Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[deliverable/linux.git] / drivers / rtc / interface.c
1 /*
2 * RTC subsystem, interface functions
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/rtc.h>
15 #include <linux/sched.h>
16 #include <linux/log2.h>
17 #include <linux/workqueue.h>
18
19 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
20 {
21 int err;
22 if (!rtc->ops)
23 err = -ENODEV;
24 else if (!rtc->ops->read_time)
25 err = -EINVAL;
26 else {
27 memset(tm, 0, sizeof(struct rtc_time));
28 err = rtc->ops->read_time(rtc->dev.parent, tm);
29 }
30 return err;
31 }
32
33 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
34 {
35 int err;
36
37 err = mutex_lock_interruptible(&rtc->ops_lock);
38 if (err)
39 return err;
40
41 err = __rtc_read_time(rtc, tm);
42 mutex_unlock(&rtc->ops_lock);
43 return err;
44 }
45 EXPORT_SYMBOL_GPL(rtc_read_time);
46
47 int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
48 {
49 int err;
50
51 err = rtc_valid_tm(tm);
52 if (err != 0)
53 return err;
54
55 err = mutex_lock_interruptible(&rtc->ops_lock);
56 if (err)
57 return err;
58
59 if (!rtc->ops)
60 err = -ENODEV;
61 else if (rtc->ops->set_time)
62 err = rtc->ops->set_time(rtc->dev.parent, tm);
63 else if (rtc->ops->set_mmss) {
64 unsigned long secs;
65 err = rtc_tm_to_time(tm, &secs);
66 if (err == 0)
67 err = rtc->ops->set_mmss(rtc->dev.parent, secs);
68 } else
69 err = -EINVAL;
70
71 mutex_unlock(&rtc->ops_lock);
72 return err;
73 }
74 EXPORT_SYMBOL_GPL(rtc_set_time);
75
76 int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
77 {
78 int err;
79
80 err = mutex_lock_interruptible(&rtc->ops_lock);
81 if (err)
82 return err;
83
84 if (!rtc->ops)
85 err = -ENODEV;
86 else if (rtc->ops->set_mmss)
87 err = rtc->ops->set_mmss(rtc->dev.parent, secs);
88 else if (rtc->ops->read_time && rtc->ops->set_time) {
89 struct rtc_time new, old;
90
91 err = rtc->ops->read_time(rtc->dev.parent, &old);
92 if (err == 0) {
93 rtc_time_to_tm(secs, &new);
94
95 /*
96 * avoid writing when we're going to change the day of
97 * the month. We will retry in the next minute. This
98 * basically means that if the RTC must not drift
99 * by more than 1 minute in 11 minutes.
100 */
101 if (!((old.tm_hour == 23 && old.tm_min == 59) ||
102 (new.tm_hour == 23 && new.tm_min == 59)))
103 err = rtc->ops->set_time(rtc->dev.parent,
104 &new);
105 }
106 }
107 else
108 err = -EINVAL;
109
110 mutex_unlock(&rtc->ops_lock);
111
112 return err;
113 }
114 EXPORT_SYMBOL_GPL(rtc_set_mmss);
115
116 int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
117 {
118 int err;
119
120 err = mutex_lock_interruptible(&rtc->ops_lock);
121 if (err)
122 return err;
123 alarm->enabled = rtc->aie_timer.enabled;
124 if (alarm->enabled)
125 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
126 mutex_unlock(&rtc->ops_lock);
127
128 return 0;
129 }
130 EXPORT_SYMBOL_GPL(rtc_read_alarm);
131
132 int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
133 {
134 struct rtc_time tm;
135 long now, scheduled;
136 int err;
137
138 err = rtc_valid_tm(&alarm->time);
139 if (err)
140 return err;
141 rtc_tm_to_time(&alarm->time, &scheduled);
142
143 /* Make sure we're not setting alarms in the past */
144 err = __rtc_read_time(rtc, &tm);
145 rtc_tm_to_time(&tm, &now);
146 if (scheduled <= now)
147 return -ETIME;
148 /*
149 * XXX - We just checked to make sure the alarm time is not
150 * in the past, but there is still a race window where if
151 * the is alarm set for the next second and the second ticks
152 * over right here, before we set the alarm.
153 */
154
155 if (!rtc->ops)
156 err = -ENODEV;
157 else if (!rtc->ops->set_alarm)
158 err = -EINVAL;
159 else
160 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
161
162 return err;
163 }
164
165 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
166 {
167 int err;
168
169 err = rtc_valid_tm(&alarm->time);
170 if (err != 0)
171 return err;
172
173 err = mutex_lock_interruptible(&rtc->ops_lock);
174 if (err)
175 return err;
176 if (rtc->aie_timer.enabled) {
177 rtc_timer_remove(rtc, &rtc->aie_timer);
178 rtc->aie_timer.enabled = 0;
179 }
180 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
181 rtc->aie_timer.period = ktime_set(0, 0);
182 if (alarm->enabled) {
183 rtc->aie_timer.enabled = 1;
184 rtc_timer_enqueue(rtc, &rtc->aie_timer);
185 }
186 mutex_unlock(&rtc->ops_lock);
187 return 0;
188 }
189 EXPORT_SYMBOL_GPL(rtc_set_alarm);
190
191 int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
192 {
193 int err = mutex_lock_interruptible(&rtc->ops_lock);
194 if (err)
195 return err;
196
197 if (rtc->aie_timer.enabled != enabled) {
198 if (enabled) {
199 rtc->aie_timer.enabled = 1;
200 rtc_timer_enqueue(rtc, &rtc->aie_timer);
201 } else {
202 rtc_timer_remove(rtc, &rtc->aie_timer);
203 rtc->aie_timer.enabled = 0;
204 }
205 }
206
207 if (!rtc->ops)
208 err = -ENODEV;
209 else if (!rtc->ops->alarm_irq_enable)
210 err = -EINVAL;
211 else
212 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
213
214 mutex_unlock(&rtc->ops_lock);
215 return err;
216 }
217 EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
218
219 int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
220 {
221 int err = mutex_lock_interruptible(&rtc->ops_lock);
222 if (err)
223 return err;
224
225 /* make sure we're changing state */
226 if (rtc->uie_rtctimer.enabled == enabled)
227 goto out;
228
229 if (enabled) {
230 struct rtc_time tm;
231 ktime_t now, onesec;
232
233 __rtc_read_time(rtc, &tm);
234 onesec = ktime_set(1, 0);
235 now = rtc_tm_to_ktime(tm);
236 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
237 rtc->uie_rtctimer.period = ktime_set(1, 0);
238 rtc->uie_rtctimer.enabled = 1;
239 rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
240 } else {
241 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
242 rtc->uie_rtctimer.enabled = 0;
243 }
244
245 out:
246 mutex_unlock(&rtc->ops_lock);
247 return err;
248
249 }
250 EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
251
252
253 /**
254 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
255 * @rtc: pointer to the rtc device
256 *
257 * This function is called when an AIE, UIE or PIE mode interrupt
258 * has occured (or been emulated).
259 *
260 * Triggers the registered irq_task function callback.
261 */
262 static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
263 {
264 unsigned long flags;
265
266 /* mark one irq of the appropriate mode */
267 spin_lock_irqsave(&rtc->irq_lock, flags);
268 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
269 spin_unlock_irqrestore(&rtc->irq_lock, flags);
270
271 /* call the task func */
272 spin_lock_irqsave(&rtc->irq_task_lock, flags);
273 if (rtc->irq_task)
274 rtc->irq_task->func(rtc->irq_task->private_data);
275 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
276
277 wake_up_interruptible(&rtc->irq_queue);
278 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
279 }
280
281
282 /**
283 * rtc_aie_update_irq - AIE mode rtctimer hook
284 * @private: pointer to the rtc_device
285 *
286 * This functions is called when the aie_timer expires.
287 */
288 void rtc_aie_update_irq(void *private)
289 {
290 struct rtc_device *rtc = (struct rtc_device *)private;
291 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
292 }
293
294
295 /**
296 * rtc_uie_update_irq - UIE mode rtctimer hook
297 * @private: pointer to the rtc_device
298 *
299 * This functions is called when the uie_timer expires.
300 */
301 void rtc_uie_update_irq(void *private)
302 {
303 struct rtc_device *rtc = (struct rtc_device *)private;
304 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
305 }
306
307
308 /**
309 * rtc_pie_update_irq - PIE mode hrtimer hook
310 * @timer: pointer to the pie mode hrtimer
311 *
312 * This function is used to emulate PIE mode interrupts
313 * using an hrtimer. This function is called when the periodic
314 * hrtimer expires.
315 */
316 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
317 {
318 struct rtc_device *rtc;
319 ktime_t period;
320 int count;
321 rtc = container_of(timer, struct rtc_device, pie_timer);
322
323 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
324 count = hrtimer_forward_now(timer, period);
325
326 rtc_handle_legacy_irq(rtc, count, RTC_PF);
327
328 return HRTIMER_RESTART;
329 }
330
331 /**
332 * rtc_update_irq - Triggered when a RTC interrupt occurs.
333 * @rtc: the rtc device
334 * @num: how many irqs are being reported (usually one)
335 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
336 * Context: any
337 */
338 void rtc_update_irq(struct rtc_device *rtc,
339 unsigned long num, unsigned long events)
340 {
341 schedule_work(&rtc->irqwork);
342 }
343 EXPORT_SYMBOL_GPL(rtc_update_irq);
344
345 static int __rtc_match(struct device *dev, void *data)
346 {
347 char *name = (char *)data;
348
349 if (strcmp(dev_name(dev), name) == 0)
350 return 1;
351 return 0;
352 }
353
354 struct rtc_device *rtc_class_open(char *name)
355 {
356 struct device *dev;
357 struct rtc_device *rtc = NULL;
358
359 dev = class_find_device(rtc_class, NULL, name, __rtc_match);
360 if (dev)
361 rtc = to_rtc_device(dev);
362
363 if (rtc) {
364 if (!try_module_get(rtc->owner)) {
365 put_device(dev);
366 rtc = NULL;
367 }
368 }
369
370 return rtc;
371 }
372 EXPORT_SYMBOL_GPL(rtc_class_open);
373
374 void rtc_class_close(struct rtc_device *rtc)
375 {
376 module_put(rtc->owner);
377 put_device(&rtc->dev);
378 }
379 EXPORT_SYMBOL_GPL(rtc_class_close);
380
381 int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
382 {
383 int retval = -EBUSY;
384
385 if (task == NULL || task->func == NULL)
386 return -EINVAL;
387
388 /* Cannot register while the char dev is in use */
389 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
390 return -EBUSY;
391
392 spin_lock_irq(&rtc->irq_task_lock);
393 if (rtc->irq_task == NULL) {
394 rtc->irq_task = task;
395 retval = 0;
396 }
397 spin_unlock_irq(&rtc->irq_task_lock);
398
399 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
400
401 return retval;
402 }
403 EXPORT_SYMBOL_GPL(rtc_irq_register);
404
405 void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
406 {
407 spin_lock_irq(&rtc->irq_task_lock);
408 if (rtc->irq_task == task)
409 rtc->irq_task = NULL;
410 spin_unlock_irq(&rtc->irq_task_lock);
411 }
412 EXPORT_SYMBOL_GPL(rtc_irq_unregister);
413
414 /**
415 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
416 * @rtc: the rtc device
417 * @task: currently registered with rtc_irq_register()
418 * @enabled: true to enable periodic IRQs
419 * Context: any
420 *
421 * Note that rtc_irq_set_freq() should previously have been used to
422 * specify the desired frequency of periodic IRQ task->func() callbacks.
423 */
424 int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
425 {
426 int err = 0;
427 unsigned long flags;
428
429 spin_lock_irqsave(&rtc->irq_task_lock, flags);
430 if (rtc->irq_task != NULL && task == NULL)
431 err = -EBUSY;
432 if (rtc->irq_task != task)
433 err = -EACCES;
434
435 if (enabled) {
436 ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
437 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
438 } else {
439 hrtimer_cancel(&rtc->pie_timer);
440 }
441 rtc->pie_enabled = enabled;
442 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
443
444 return err;
445 }
446 EXPORT_SYMBOL_GPL(rtc_irq_set_state);
447
448 /**
449 * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ
450 * @rtc: the rtc device
451 * @task: currently registered with rtc_irq_register()
452 * @freq: positive frequency with which task->func() will be called
453 * Context: any
454 *
455 * Note that rtc_irq_set_state() is used to enable or disable the
456 * periodic IRQs.
457 */
458 int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
459 {
460 int err = 0;
461 unsigned long flags;
462
463 spin_lock_irqsave(&rtc->irq_task_lock, flags);
464 if (rtc->irq_task != NULL && task == NULL)
465 err = -EBUSY;
466 if (rtc->irq_task != task)
467 err = -EACCES;
468 if (err == 0) {
469 rtc->irq_freq = freq;
470 if (rtc->pie_enabled) {
471 ktime_t period;
472 hrtimer_cancel(&rtc->pie_timer);
473 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
474 hrtimer_start(&rtc->pie_timer, period,
475 HRTIMER_MODE_REL);
476 }
477 }
478 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
479 return err;
480 }
481 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
482
483 /**
484 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
485 * @rtc rtc device
486 * @timer timer being added.
487 *
488 * Enqueues a timer onto the rtc devices timerqueue and sets
489 * the next alarm event appropriately.
490 *
491 * Must hold ops_lock for proper serialization of timerqueue
492 */
493 void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
494 {
495 timerqueue_add(&rtc->timerqueue, &timer->node);
496 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
497 struct rtc_wkalrm alarm;
498 int err;
499 alarm.time = rtc_ktime_to_tm(timer->node.expires);
500 alarm.enabled = 1;
501 err = __rtc_set_alarm(rtc, &alarm);
502 if (err == -ETIME)
503 schedule_work(&rtc->irqwork);
504 }
505 }
506
507 /**
508 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
509 * @rtc rtc device
510 * @timer timer being removed.
511 *
512 * Removes a timer onto the rtc devices timerqueue and sets
513 * the next alarm event appropriately.
514 *
515 * Must hold ops_lock for proper serialization of timerqueue
516 */
517 void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
518 {
519 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
520 timerqueue_del(&rtc->timerqueue, &timer->node);
521
522 if (next == &timer->node) {
523 struct rtc_wkalrm alarm;
524 int err;
525 next = timerqueue_getnext(&rtc->timerqueue);
526 if (!next)
527 return;
528 alarm.time = rtc_ktime_to_tm(next->expires);
529 alarm.enabled = 1;
530 err = __rtc_set_alarm(rtc, &alarm);
531 if (err == -ETIME)
532 schedule_work(&rtc->irqwork);
533 }
534 }
535
536 /**
537 * rtc_timer_do_work - Expires rtc timers
538 * @rtc rtc device
539 * @timer timer being removed.
540 *
541 * Expires rtc timers. Reprograms next alarm event if needed.
542 * Called via worktask.
543 *
544 * Serializes access to timerqueue via ops_lock mutex
545 */
546 void rtc_timer_do_work(struct work_struct *work)
547 {
548 struct rtc_timer *timer;
549 struct timerqueue_node *next;
550 ktime_t now;
551 struct rtc_time tm;
552
553 struct rtc_device *rtc =
554 container_of(work, struct rtc_device, irqwork);
555
556 mutex_lock(&rtc->ops_lock);
557 again:
558 __rtc_read_time(rtc, &tm);
559 now = rtc_tm_to_ktime(tm);
560 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
561 if (next->expires.tv64 > now.tv64)
562 break;
563
564 /* expire timer */
565 timer = container_of(next, struct rtc_timer, node);
566 timerqueue_del(&rtc->timerqueue, &timer->node);
567 timer->enabled = 0;
568 if (timer->task.func)
569 timer->task.func(timer->task.private_data);
570
571 /* Re-add/fwd periodic timers */
572 if (ktime_to_ns(timer->period)) {
573 timer->node.expires = ktime_add(timer->node.expires,
574 timer->period);
575 timer->enabled = 1;
576 timerqueue_add(&rtc->timerqueue, &timer->node);
577 }
578 }
579
580 /* Set next alarm */
581 if (next) {
582 struct rtc_wkalrm alarm;
583 int err;
584 alarm.time = rtc_ktime_to_tm(next->expires);
585 alarm.enabled = 1;
586 err = __rtc_set_alarm(rtc, &alarm);
587 if (err == -ETIME)
588 goto again;
589 }
590
591 mutex_unlock(&rtc->ops_lock);
592 }
593
594
595 /* rtc_timer_init - Initializes an rtc_timer
596 * @timer: timer to be intiialized
597 * @f: function pointer to be called when timer fires
598 * @data: private data passed to function pointer
599 *
600 * Kernel interface to initializing an rtc_timer.
601 */
602 void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
603 {
604 timerqueue_init(&timer->node);
605 timer->enabled = 0;
606 timer->task.func = f;
607 timer->task.private_data = data;
608 }
609
610 /* rtc_timer_start - Sets an rtc_timer to fire in the future
611 * @ rtc: rtc device to be used
612 * @ timer: timer being set
613 * @ expires: time at which to expire the timer
614 * @ period: period that the timer will recur
615 *
616 * Kernel interface to set an rtc_timer
617 */
618 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
619 ktime_t expires, ktime_t period)
620 {
621 int ret = 0;
622 mutex_lock(&rtc->ops_lock);
623 if (timer->enabled)
624 rtc_timer_remove(rtc, timer);
625
626 timer->node.expires = expires;
627 timer->period = period;
628
629 timer->enabled = 1;
630 rtc_timer_enqueue(rtc, timer);
631
632 mutex_unlock(&rtc->ops_lock);
633 return ret;
634 }
635
636 /* rtc_timer_cancel - Stops an rtc_timer
637 * @ rtc: rtc device to be used
638 * @ timer: timer being set
639 *
640 * Kernel interface to cancel an rtc_timer
641 */
642 int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
643 {
644 int ret = 0;
645 mutex_lock(&rtc->ops_lock);
646 if (timer->enabled)
647 rtc_timer_remove(rtc, timer);
648 timer->enabled = 0;
649 mutex_unlock(&rtc->ops_lock);
650 return ret;
651 }
652
653
This page took 0.045177 seconds and 5 git commands to generate.