PM / Runtime: Combine runtime PM entry points
[deliverable/linux.git] / drivers / base / power / runtime.c
1 /*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/jiffies.h>
13
14 static int rpm_resume(struct device *dev, int rpmflags);
15
16 /**
17 * update_pm_runtime_accounting - Update the time accounting of power states
18 * @dev: Device to update the accounting for
19 *
20 * In order to be able to have time accounting of the various power states
21 * (as used by programs such as PowerTOP to show the effectiveness of runtime
22 * PM), we need to track the time spent in each state.
23 * update_pm_runtime_accounting must be called each time before the
24 * runtime_status field is updated, to account the time in the old state
25 * correctly.
26 */
27 void update_pm_runtime_accounting(struct device *dev)
28 {
29 unsigned long now = jiffies;
30 int delta;
31
32 delta = now - dev->power.accounting_timestamp;
33
34 if (delta < 0)
35 delta = 0;
36
37 dev->power.accounting_timestamp = now;
38
39 if (dev->power.disable_depth > 0)
40 return;
41
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
44 else
45 dev->power.active_jiffies += delta;
46 }
47
48 static void __update_runtime_status(struct device *dev, enum rpm_status status)
49 {
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
52 }
53
54 /**
55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
57 */
58 static void pm_runtime_deactivate_timer(struct device *dev)
59 {
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
63 }
64 }
65
66 /**
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
69 */
70 static void pm_runtime_cancel_pending(struct device *dev)
71 {
72 pm_runtime_deactivate_timer(dev);
73 /*
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
76 */
77 dev->power.request = RPM_REQ_NONE;
78 }
79
80 /**
81 * rpm_check_suspend_allowed - Test whether a device may be suspended.
82 * @dev: Device to test.
83 */
84 static int rpm_check_suspend_allowed(struct device *dev)
85 {
86 int retval = 0;
87
88 if (dev->power.runtime_error)
89 retval = -EINVAL;
90 else if (atomic_read(&dev->power.usage_count) > 0
91 || dev->power.disable_depth > 0)
92 retval = -EAGAIN;
93 else if (!pm_children_suspended(dev))
94 retval = -EBUSY;
95
96 /* Pending resume requests take precedence over suspends. */
97 else if ((dev->power.deferred_resume
98 && dev->power.status == RPM_SUSPENDING)
99 || (dev->power.request_pending
100 && dev->power.request == RPM_REQ_RESUME))
101 retval = -EAGAIN;
102 else if (dev->power.runtime_status == RPM_SUSPENDED)
103 retval = 1;
104
105 return retval;
106 }
107
108
109 /**
110 * rpm_idle - Notify device bus type if the device can be suspended.
111 * @dev: Device to notify the bus type about.
112 * @rpmflags: Flag bits.
113 *
114 * Check if the device's run-time PM status allows it to be suspended. If
115 * another idle notification has been started earlier, return immediately. If
116 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
117 * run the ->runtime_idle() callback directly.
118 *
119 * This function must be called under dev->power.lock with interrupts disabled.
120 */
121 static int rpm_idle(struct device *dev, int rpmflags)
122 __releases(&dev->power.lock) __acquires(&dev->power.lock)
123 {
124 int retval;
125
126 retval = rpm_check_suspend_allowed(dev);
127 if (retval < 0)
128 ; /* Conditions are wrong. */
129
130 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
131 else if (dev->power.runtime_status != RPM_ACTIVE)
132 retval = -EAGAIN;
133
134 /*
135 * Any pending request other than an idle notification takes
136 * precedence over us, except that the timer may be running.
137 */
138 else if (dev->power.request_pending &&
139 dev->power.request > RPM_REQ_IDLE)
140 retval = -EAGAIN;
141
142 /* Act as though RPM_NOWAIT is always set. */
143 else if (dev->power.idle_notification)
144 retval = -EINPROGRESS;
145 if (retval)
146 goto out;
147
148 /* Pending requests need to be canceled. */
149 dev->power.request = RPM_REQ_NONE;
150
151 /* Carry out an asynchronous or a synchronous idle notification. */
152 if (rpmflags & RPM_ASYNC) {
153 dev->power.request = RPM_REQ_IDLE;
154 if (!dev->power.request_pending) {
155 dev->power.request_pending = true;
156 queue_work(pm_wq, &dev->power.work);
157 }
158 goto out;
159 }
160
161 dev->power.idle_notification = true;
162
163 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
164 spin_unlock_irq(&dev->power.lock);
165
166 dev->bus->pm->runtime_idle(dev);
167
168 spin_lock_irq(&dev->power.lock);
169 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
170 spin_unlock_irq(&dev->power.lock);
171
172 dev->type->pm->runtime_idle(dev);
173
174 spin_lock_irq(&dev->power.lock);
175 } else if (dev->class && dev->class->pm
176 && dev->class->pm->runtime_idle) {
177 spin_unlock_irq(&dev->power.lock);
178
179 dev->class->pm->runtime_idle(dev);
180
181 spin_lock_irq(&dev->power.lock);
182 }
183
184 dev->power.idle_notification = false;
185 wake_up_all(&dev->power.wait_queue);
186
187 out:
188 return retval;
189 }
190
191 /**
192 * rpm_suspend - Carry out run-time suspend of given device.
193 * @dev: Device to suspend.
194 * @rpmflags: Flag bits.
195 *
196 * Check if the device's run-time PM status allows it to be suspended. If
197 * another suspend has been started earlier, either return immediately or wait
198 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
199 * pending idle notification. If the RPM_ASYNC flag is set then queue a
200 * suspend request; otherwise run the ->runtime_suspend() callback directly.
201 * If a deferred resume was requested while the callback was running then carry
202 * it out; otherwise send an idle notification for the device (if the suspend
203 * failed) or for its parent (if the suspend succeeded).
204 *
205 * This function must be called under dev->power.lock with interrupts disabled.
206 */
207 static int rpm_suspend(struct device *dev, int rpmflags)
208 __releases(&dev->power.lock) __acquires(&dev->power.lock)
209 {
210 struct device *parent = NULL;
211 bool notify = false;
212 int retval;
213
214 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
215
216 repeat:
217 retval = rpm_check_suspend_allowed(dev);
218
219 if (retval < 0)
220 ; /* Conditions are wrong. */
221
222 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
223 else if (dev->power.runtime_status == RPM_RESUMING &&
224 !(rpmflags & RPM_ASYNC))
225 retval = -EAGAIN;
226 if (retval)
227 goto out;
228
229 /* Other scheduled or pending requests need to be canceled. */
230 pm_runtime_cancel_pending(dev);
231
232 if (dev->power.runtime_status == RPM_SUSPENDING) {
233 DEFINE_WAIT(wait);
234
235 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
236 retval = -EINPROGRESS;
237 goto out;
238 }
239
240 /* Wait for the other suspend running in parallel with us. */
241 for (;;) {
242 prepare_to_wait(&dev->power.wait_queue, &wait,
243 TASK_UNINTERRUPTIBLE);
244 if (dev->power.runtime_status != RPM_SUSPENDING)
245 break;
246
247 spin_unlock_irq(&dev->power.lock);
248
249 schedule();
250
251 spin_lock_irq(&dev->power.lock);
252 }
253 finish_wait(&dev->power.wait_queue, &wait);
254 goto repeat;
255 }
256
257 /* Carry out an asynchronous or a synchronous suspend. */
258 if (rpmflags & RPM_ASYNC) {
259 dev->power.request = RPM_REQ_SUSPEND;
260 if (!dev->power.request_pending) {
261 dev->power.request_pending = true;
262 queue_work(pm_wq, &dev->power.work);
263 }
264 goto out;
265 }
266
267 __update_runtime_status(dev, RPM_SUSPENDING);
268 dev->power.deferred_resume = false;
269
270 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
271 spin_unlock_irq(&dev->power.lock);
272
273 retval = dev->bus->pm->runtime_suspend(dev);
274
275 spin_lock_irq(&dev->power.lock);
276 dev->power.runtime_error = retval;
277 } else if (dev->type && dev->type->pm
278 && dev->type->pm->runtime_suspend) {
279 spin_unlock_irq(&dev->power.lock);
280
281 retval = dev->type->pm->runtime_suspend(dev);
282
283 spin_lock_irq(&dev->power.lock);
284 dev->power.runtime_error = retval;
285 } else if (dev->class && dev->class->pm
286 && dev->class->pm->runtime_suspend) {
287 spin_unlock_irq(&dev->power.lock);
288
289 retval = dev->class->pm->runtime_suspend(dev);
290
291 spin_lock_irq(&dev->power.lock);
292 dev->power.runtime_error = retval;
293 } else {
294 retval = -ENOSYS;
295 }
296
297 if (retval) {
298 __update_runtime_status(dev, RPM_ACTIVE);
299 dev->power.deferred_resume = 0;
300 if (retval == -EAGAIN || retval == -EBUSY) {
301 if (dev->power.timer_expires == 0)
302 notify = true;
303 dev->power.runtime_error = 0;
304 } else {
305 pm_runtime_cancel_pending(dev);
306 }
307 } else {
308 __update_runtime_status(dev, RPM_SUSPENDED);
309 pm_runtime_deactivate_timer(dev);
310
311 if (dev->parent) {
312 parent = dev->parent;
313 atomic_add_unless(&parent->power.child_count, -1, 0);
314 }
315 }
316 wake_up_all(&dev->power.wait_queue);
317
318 if (dev->power.deferred_resume) {
319 rpm_resume(dev, 0);
320 retval = -EAGAIN;
321 goto out;
322 }
323
324 if (notify)
325 rpm_idle(dev, 0);
326
327 if (parent && !parent->power.ignore_children) {
328 spin_unlock_irq(&dev->power.lock);
329
330 pm_request_idle(parent);
331
332 spin_lock_irq(&dev->power.lock);
333 }
334
335 out:
336 dev_dbg(dev, "%s returns %d\n", __func__, retval);
337
338 return retval;
339 }
340
341 /**
342 * rpm_resume - Carry out run-time resume of given device.
343 * @dev: Device to resume.
344 * @rpmflags: Flag bits.
345 *
346 * Check if the device's run-time PM status allows it to be resumed. Cancel
347 * any scheduled or pending requests. If another resume has been started
348 * earlier, either return imediately or wait for it to finish, depending on the
349 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
350 * parallel with this function, either tell the other process to resume after
351 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
352 * flag is set then queue a resume request; otherwise run the
353 * ->runtime_resume() callback directly. Queue an idle notification for the
354 * device if the resume succeeded.
355 *
356 * This function must be called under dev->power.lock with interrupts disabled.
357 */
358 static int rpm_resume(struct device *dev, int rpmflags)
359 __releases(&dev->power.lock) __acquires(&dev->power.lock)
360 {
361 struct device *parent = NULL;
362 int retval = 0;
363
364 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
365
366 repeat:
367 if (dev->power.runtime_error)
368 retval = -EINVAL;
369 else if (dev->power.disable_depth > 0)
370 retval = -EAGAIN;
371 if (retval)
372 goto out;
373
374 /* Other scheduled or pending requests need to be canceled. */
375 pm_runtime_cancel_pending(dev);
376
377 if (dev->power.runtime_status == RPM_ACTIVE) {
378 retval = 1;
379 goto out;
380 }
381
382 if (dev->power.runtime_status == RPM_RESUMING
383 || dev->power.runtime_status == RPM_SUSPENDING) {
384 DEFINE_WAIT(wait);
385
386 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
387 if (dev->power.runtime_status == RPM_SUSPENDING)
388 dev->power.deferred_resume = true;
389 else
390 retval = -EINPROGRESS;
391 goto out;
392 }
393
394 /* Wait for the operation carried out in parallel with us. */
395 for (;;) {
396 prepare_to_wait(&dev->power.wait_queue, &wait,
397 TASK_UNINTERRUPTIBLE);
398 if (dev->power.runtime_status != RPM_RESUMING
399 && dev->power.runtime_status != RPM_SUSPENDING)
400 break;
401
402 spin_unlock_irq(&dev->power.lock);
403
404 schedule();
405
406 spin_lock_irq(&dev->power.lock);
407 }
408 finish_wait(&dev->power.wait_queue, &wait);
409 goto repeat;
410 }
411
412 /* Carry out an asynchronous or a synchronous resume. */
413 if (rpmflags & RPM_ASYNC) {
414 dev->power.request = RPM_REQ_RESUME;
415 if (!dev->power.request_pending) {
416 dev->power.request_pending = true;
417 queue_work(pm_wq, &dev->power.work);
418 }
419 retval = 0;
420 goto out;
421 }
422
423 if (!parent && dev->parent) {
424 /*
425 * Increment the parent's resume counter and resume it if
426 * necessary.
427 */
428 parent = dev->parent;
429 spin_unlock(&dev->power.lock);
430
431 pm_runtime_get_noresume(parent);
432
433 spin_lock(&parent->power.lock);
434 /*
435 * We can resume if the parent's run-time PM is disabled or it
436 * is set to ignore children.
437 */
438 if (!parent->power.disable_depth
439 && !parent->power.ignore_children) {
440 rpm_resume(parent, 0);
441 if (parent->power.runtime_status != RPM_ACTIVE)
442 retval = -EBUSY;
443 }
444 spin_unlock(&parent->power.lock);
445
446 spin_lock(&dev->power.lock);
447 if (retval)
448 goto out;
449 goto repeat;
450 }
451
452 __update_runtime_status(dev, RPM_RESUMING);
453
454 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
455 spin_unlock_irq(&dev->power.lock);
456
457 retval = dev->bus->pm->runtime_resume(dev);
458
459 spin_lock_irq(&dev->power.lock);
460 dev->power.runtime_error = retval;
461 } else if (dev->type && dev->type->pm
462 && dev->type->pm->runtime_resume) {
463 spin_unlock_irq(&dev->power.lock);
464
465 retval = dev->type->pm->runtime_resume(dev);
466
467 spin_lock_irq(&dev->power.lock);
468 dev->power.runtime_error = retval;
469 } else if (dev->class && dev->class->pm
470 && dev->class->pm->runtime_resume) {
471 spin_unlock_irq(&dev->power.lock);
472
473 retval = dev->class->pm->runtime_resume(dev);
474
475 spin_lock_irq(&dev->power.lock);
476 dev->power.runtime_error = retval;
477 } else {
478 retval = -ENOSYS;
479 }
480
481 if (retval) {
482 __update_runtime_status(dev, RPM_SUSPENDED);
483 pm_runtime_cancel_pending(dev);
484 } else {
485 __update_runtime_status(dev, RPM_ACTIVE);
486 if (parent)
487 atomic_inc(&parent->power.child_count);
488 }
489 wake_up_all(&dev->power.wait_queue);
490
491 if (!retval)
492 rpm_idle(dev, RPM_ASYNC);
493
494 out:
495 if (parent) {
496 spin_unlock_irq(&dev->power.lock);
497
498 pm_runtime_put(parent);
499
500 spin_lock_irq(&dev->power.lock);
501 }
502
503 dev_dbg(dev, "%s returns %d\n", __func__, retval);
504
505 return retval;
506 }
507
508 /**
509 * pm_runtime_work - Universal run-time PM work function.
510 * @work: Work structure used for scheduling the execution of this function.
511 *
512 * Use @work to get the device object the work is to be done for, determine what
513 * is to be done and execute the appropriate run-time PM function.
514 */
515 static void pm_runtime_work(struct work_struct *work)
516 {
517 struct device *dev = container_of(work, struct device, power.work);
518 enum rpm_request req;
519
520 spin_lock_irq(&dev->power.lock);
521
522 if (!dev->power.request_pending)
523 goto out;
524
525 req = dev->power.request;
526 dev->power.request = RPM_REQ_NONE;
527 dev->power.request_pending = false;
528
529 switch (req) {
530 case RPM_REQ_NONE:
531 break;
532 case RPM_REQ_IDLE:
533 rpm_idle(dev, RPM_NOWAIT);
534 break;
535 case RPM_REQ_SUSPEND:
536 rpm_suspend(dev, RPM_NOWAIT);
537 break;
538 case RPM_REQ_RESUME:
539 rpm_resume(dev, RPM_NOWAIT);
540 break;
541 }
542
543 out:
544 spin_unlock_irq(&dev->power.lock);
545 }
546
547 /**
548 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
549 * @data: Device pointer passed by pm_schedule_suspend().
550 *
551 * Check if the time is right and queue a suspend request.
552 */
553 static void pm_suspend_timer_fn(unsigned long data)
554 {
555 struct device *dev = (struct device *)data;
556 unsigned long flags;
557 unsigned long expires;
558
559 spin_lock_irqsave(&dev->power.lock, flags);
560
561 expires = dev->power.timer_expires;
562 /* If 'expire' is after 'jiffies' we've been called too early. */
563 if (expires > 0 && !time_after(expires, jiffies)) {
564 dev->power.timer_expires = 0;
565 rpm_suspend(dev, RPM_ASYNC);
566 }
567
568 spin_unlock_irqrestore(&dev->power.lock, flags);
569 }
570
571 /**
572 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
573 * @dev: Device to suspend.
574 * @delay: Time to wait before submitting a suspend request, in milliseconds.
575 */
576 int pm_schedule_suspend(struct device *dev, unsigned int delay)
577 {
578 unsigned long flags;
579 int retval;
580
581 spin_lock_irqsave(&dev->power.lock, flags);
582
583 if (!delay) {
584 retval = rpm_suspend(dev, RPM_ASYNC);
585 goto out;
586 }
587
588 retval = rpm_check_suspend_allowed(dev);
589 if (retval)
590 goto out;
591
592 /* Other scheduled or pending requests need to be canceled. */
593 pm_runtime_cancel_pending(dev);
594
595 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
596 dev->power.timer_expires += !dev->power.timer_expires;
597 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
598
599 out:
600 spin_unlock_irqrestore(&dev->power.lock, flags);
601
602 return retval;
603 }
604 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
605
606 /**
607 * __pm_runtime_idle - Entry point for run-time idle operations.
608 * @dev: Device to send idle notification for.
609 * @rpmflags: Flag bits.
610 *
611 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
612 * return immediately if it is larger than zero. Then carry out an idle
613 * notification, either synchronous or asynchronous.
614 *
615 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
616 */
617 int __pm_runtime_idle(struct device *dev, int rpmflags)
618 {
619 unsigned long flags;
620 int retval;
621
622 if (rpmflags & RPM_GET_PUT) {
623 if (!atomic_dec_and_test(&dev->power.usage_count))
624 return 0;
625 }
626
627 spin_lock_irqsave(&dev->power.lock, flags);
628 retval = rpm_idle(dev, rpmflags);
629 spin_unlock_irqrestore(&dev->power.lock, flags);
630
631 return retval;
632 }
633 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
634
635 /**
636 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
637 * @dev: Device to suspend.
638 * @rpmflags: Flag bits.
639 *
640 * Carry out a suspend, either synchronous or asynchronous.
641 *
642 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
643 */
644 int __pm_runtime_suspend(struct device *dev, int rpmflags)
645 {
646 unsigned long flags;
647 int retval;
648
649 spin_lock_irqsave(&dev->power.lock, flags);
650 retval = rpm_suspend(dev, rpmflags);
651 spin_unlock_irqrestore(&dev->power.lock, flags);
652
653 return retval;
654 }
655 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
656
657 /**
658 * __pm_runtime_resume - Entry point for run-time resume operations.
659 * @dev: Device to resume.
660 * @rpmflags: Flag bits.
661 *
662 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
663 * carry out a resume, either synchronous or asynchronous.
664 *
665 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
666 */
667 int __pm_runtime_resume(struct device *dev, int rpmflags)
668 {
669 unsigned long flags;
670 int retval;
671
672 if (rpmflags & RPM_GET_PUT)
673 atomic_inc(&dev->power.usage_count);
674
675 spin_lock_irqsave(&dev->power.lock, flags);
676 retval = rpm_resume(dev, rpmflags);
677 spin_unlock_irqrestore(&dev->power.lock, flags);
678
679 return retval;
680 }
681 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
682
683 /**
684 * __pm_runtime_set_status - Set run-time PM status of a device.
685 * @dev: Device to handle.
686 * @status: New run-time PM status of the device.
687 *
688 * If run-time PM of the device is disabled or its power.runtime_error field is
689 * different from zero, the status may be changed either to RPM_ACTIVE, or to
690 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
691 * However, if the device has a parent and the parent is not active, and the
692 * parent's power.ignore_children flag is unset, the device's status cannot be
693 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
694 *
695 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
696 * and the device parent's counter of unsuspended children is modified to
697 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
698 * notification request for the parent is submitted.
699 */
700 int __pm_runtime_set_status(struct device *dev, unsigned int status)
701 {
702 struct device *parent = dev->parent;
703 unsigned long flags;
704 bool notify_parent = false;
705 int error = 0;
706
707 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
708 return -EINVAL;
709
710 spin_lock_irqsave(&dev->power.lock, flags);
711
712 if (!dev->power.runtime_error && !dev->power.disable_depth) {
713 error = -EAGAIN;
714 goto out;
715 }
716
717 if (dev->power.runtime_status == status)
718 goto out_set;
719
720 if (status == RPM_SUSPENDED) {
721 /* It always is possible to set the status to 'suspended'. */
722 if (parent) {
723 atomic_add_unless(&parent->power.child_count, -1, 0);
724 notify_parent = !parent->power.ignore_children;
725 }
726 goto out_set;
727 }
728
729 if (parent) {
730 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
731
732 /*
733 * It is invalid to put an active child under a parent that is
734 * not active, has run-time PM enabled and the
735 * 'power.ignore_children' flag unset.
736 */
737 if (!parent->power.disable_depth
738 && !parent->power.ignore_children
739 && parent->power.runtime_status != RPM_ACTIVE)
740 error = -EBUSY;
741 else if (dev->power.runtime_status == RPM_SUSPENDED)
742 atomic_inc(&parent->power.child_count);
743
744 spin_unlock(&parent->power.lock);
745
746 if (error)
747 goto out;
748 }
749
750 out_set:
751 __update_runtime_status(dev, status);
752 dev->power.runtime_error = 0;
753 out:
754 spin_unlock_irqrestore(&dev->power.lock, flags);
755
756 if (notify_parent)
757 pm_request_idle(parent);
758
759 return error;
760 }
761 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
762
763 /**
764 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
765 * @dev: Device to handle.
766 *
767 * Flush all pending requests for the device from pm_wq and wait for all
768 * run-time PM operations involving the device in progress to complete.
769 *
770 * Should be called under dev->power.lock with interrupts disabled.
771 */
772 static void __pm_runtime_barrier(struct device *dev)
773 {
774 pm_runtime_deactivate_timer(dev);
775
776 if (dev->power.request_pending) {
777 dev->power.request = RPM_REQ_NONE;
778 spin_unlock_irq(&dev->power.lock);
779
780 cancel_work_sync(&dev->power.work);
781
782 spin_lock_irq(&dev->power.lock);
783 dev->power.request_pending = false;
784 }
785
786 if (dev->power.runtime_status == RPM_SUSPENDING
787 || dev->power.runtime_status == RPM_RESUMING
788 || dev->power.idle_notification) {
789 DEFINE_WAIT(wait);
790
791 /* Suspend, wake-up or idle notification in progress. */
792 for (;;) {
793 prepare_to_wait(&dev->power.wait_queue, &wait,
794 TASK_UNINTERRUPTIBLE);
795 if (dev->power.runtime_status != RPM_SUSPENDING
796 && dev->power.runtime_status != RPM_RESUMING
797 && !dev->power.idle_notification)
798 break;
799 spin_unlock_irq(&dev->power.lock);
800
801 schedule();
802
803 spin_lock_irq(&dev->power.lock);
804 }
805 finish_wait(&dev->power.wait_queue, &wait);
806 }
807 }
808
809 /**
810 * pm_runtime_barrier - Flush pending requests and wait for completions.
811 * @dev: Device to handle.
812 *
813 * Prevent the device from being suspended by incrementing its usage counter and
814 * if there's a pending resume request for the device, wake the device up.
815 * Next, make sure that all pending requests for the device have been flushed
816 * from pm_wq and wait for all run-time PM operations involving the device in
817 * progress to complete.
818 *
819 * Return value:
820 * 1, if there was a resume request pending and the device had to be woken up,
821 * 0, otherwise
822 */
823 int pm_runtime_barrier(struct device *dev)
824 {
825 int retval = 0;
826
827 pm_runtime_get_noresume(dev);
828 spin_lock_irq(&dev->power.lock);
829
830 if (dev->power.request_pending
831 && dev->power.request == RPM_REQ_RESUME) {
832 rpm_resume(dev, 0);
833 retval = 1;
834 }
835
836 __pm_runtime_barrier(dev);
837
838 spin_unlock_irq(&dev->power.lock);
839 pm_runtime_put_noidle(dev);
840
841 return retval;
842 }
843 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
844
845 /**
846 * __pm_runtime_disable - Disable run-time PM of a device.
847 * @dev: Device to handle.
848 * @check_resume: If set, check if there's a resume request for the device.
849 *
850 * Increment power.disable_depth for the device and if was zero previously,
851 * cancel all pending run-time PM requests for the device and wait for all
852 * operations in progress to complete. The device can be either active or
853 * suspended after its run-time PM has been disabled.
854 *
855 * If @check_resume is set and there's a resume request pending when
856 * __pm_runtime_disable() is called and power.disable_depth is zero, the
857 * function will wake up the device before disabling its run-time PM.
858 */
859 void __pm_runtime_disable(struct device *dev, bool check_resume)
860 {
861 spin_lock_irq(&dev->power.lock);
862
863 if (dev->power.disable_depth > 0) {
864 dev->power.disable_depth++;
865 goto out;
866 }
867
868 /*
869 * Wake up the device if there's a resume request pending, because that
870 * means there probably is some I/O to process and disabling run-time PM
871 * shouldn't prevent the device from processing the I/O.
872 */
873 if (check_resume && dev->power.request_pending
874 && dev->power.request == RPM_REQ_RESUME) {
875 /*
876 * Prevent suspends and idle notifications from being carried
877 * out after we have woken up the device.
878 */
879 pm_runtime_get_noresume(dev);
880
881 rpm_resume(dev, 0);
882
883 pm_runtime_put_noidle(dev);
884 }
885
886 if (!dev->power.disable_depth++)
887 __pm_runtime_barrier(dev);
888
889 out:
890 spin_unlock_irq(&dev->power.lock);
891 }
892 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
893
894 /**
895 * pm_runtime_enable - Enable run-time PM of a device.
896 * @dev: Device to handle.
897 */
898 void pm_runtime_enable(struct device *dev)
899 {
900 unsigned long flags;
901
902 spin_lock_irqsave(&dev->power.lock, flags);
903
904 if (dev->power.disable_depth > 0)
905 dev->power.disable_depth--;
906 else
907 dev_warn(dev, "Unbalanced %s!\n", __func__);
908
909 spin_unlock_irqrestore(&dev->power.lock, flags);
910 }
911 EXPORT_SYMBOL_GPL(pm_runtime_enable);
912
913 /**
914 * pm_runtime_forbid - Block run-time PM of a device.
915 * @dev: Device to handle.
916 *
917 * Increase the device's usage count and clear its power.runtime_auto flag,
918 * so that it cannot be suspended at run time until pm_runtime_allow() is called
919 * for it.
920 */
921 void pm_runtime_forbid(struct device *dev)
922 {
923 spin_lock_irq(&dev->power.lock);
924 if (!dev->power.runtime_auto)
925 goto out;
926
927 dev->power.runtime_auto = false;
928 atomic_inc(&dev->power.usage_count);
929 rpm_resume(dev, 0);
930
931 out:
932 spin_unlock_irq(&dev->power.lock);
933 }
934 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
935
936 /**
937 * pm_runtime_allow - Unblock run-time PM of a device.
938 * @dev: Device to handle.
939 *
940 * Decrease the device's usage count and set its power.runtime_auto flag.
941 */
942 void pm_runtime_allow(struct device *dev)
943 {
944 spin_lock_irq(&dev->power.lock);
945 if (dev->power.runtime_auto)
946 goto out;
947
948 dev->power.runtime_auto = true;
949 if (atomic_dec_and_test(&dev->power.usage_count))
950 rpm_idle(dev, 0);
951
952 out:
953 spin_unlock_irq(&dev->power.lock);
954 }
955 EXPORT_SYMBOL_GPL(pm_runtime_allow);
956
957 /**
958 * pm_runtime_init - Initialize run-time PM fields in given device object.
959 * @dev: Device object to initialize.
960 */
961 void pm_runtime_init(struct device *dev)
962 {
963 dev->power.runtime_status = RPM_SUSPENDED;
964 dev->power.idle_notification = false;
965
966 dev->power.disable_depth = 1;
967 atomic_set(&dev->power.usage_count, 0);
968
969 dev->power.runtime_error = 0;
970
971 atomic_set(&dev->power.child_count, 0);
972 pm_suspend_ignore_children(dev, false);
973 dev->power.runtime_auto = true;
974
975 dev->power.request_pending = false;
976 dev->power.request = RPM_REQ_NONE;
977 dev->power.deferred_resume = false;
978 dev->power.accounting_timestamp = jiffies;
979 INIT_WORK(&dev->power.work, pm_runtime_work);
980
981 dev->power.timer_expires = 0;
982 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
983 (unsigned long)dev);
984
985 init_waitqueue_head(&dev->power.wait_queue);
986 }
987
988 /**
989 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
990 * @dev: Device object being removed from device hierarchy.
991 */
992 void pm_runtime_remove(struct device *dev)
993 {
994 __pm_runtime_disable(dev, false);
995
996 /* Change the status back to 'suspended' to match the initial status. */
997 if (dev->power.runtime_status == RPM_ACTIVE)
998 pm_runtime_set_suspended(dev);
999 }
This page took 0.130966 seconds and 5 git commands to generate.