2 * drivers/misc/logger.c
6 * Copyright (C) 2007-2008 Google, Inc.
8 * Robert Love <rlove@google.com>
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define pr_fmt(fmt) "logger: " fmt
22 #include <linux/sched.h>
23 #include <linux/module.h>
25 #include <linux/miscdevice.h>
26 #include <linux/uaccess.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/vmalloc.h>
31 #include <linux/aio.h>
34 #include <asm/ioctls.h>
37 * struct logger_log - represents a specific log, such as 'main' or 'radio'
38 * @buffer: The actual ring buffer
39 * @misc: The "misc" device representing the log
40 * @wq: The wait queue for @readers
41 * @readers: This log's readers
42 * @mutex: The mutex that protects the @buffer
43 * @w_off: The current write head offset
44 * @head: The head, or location that readers start reading at.
45 * @size: The size of the log
46 * @logs: The list of log channels
48 * This structure lives from module insertion until module removal, so it does
49 * not need additional reference counting. The structure is protected by the
53 unsigned char *buffer
;
54 struct miscdevice misc
;
56 struct list_head readers
;
61 struct list_head logs
;
64 static LIST_HEAD(log_list
);
67 * struct logger_reader - a logging device open for reading
68 * @log: The associated log
69 * @list: The associated entry in @logger_log's list
70 * @r_off: The current read head offset.
71 * @r_all: Reader can read all entries
72 * @r_ver: Reader ABI version
74 * This object lives from open to release, so we don't need additional
75 * reference counting. The structure is protected by log->mutex.
77 struct logger_reader
{
78 struct logger_log
*log
;
79 struct list_head list
;
85 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
86 static size_t logger_offset(struct logger_log
*log
, size_t n
)
88 return n
& (log
->size
- 1);
92 * file_get_log - Given a file structure, return the associated log
94 * This isn't aesthetic. We have several goals:
96 * 1) Need to quickly obtain the associated log during an I/O operation
97 * 2) Readers need to maintain state (logger_reader)
98 * 3) Writers need to be very fast (open() should be a near no-op)
100 * In the reader case, we can trivially go file->logger_reader->logger_log.
101 * For a writer, we don't want to maintain a logger_reader, so we just go
102 * file->logger_log. Thus what file->private_data points at depends on whether
103 * or not the file was opened for reading. This function hides that dirtiness.
105 static inline struct logger_log
*file_get_log(struct file
*file
)
107 if (file
->f_mode
& FMODE_READ
) {
108 struct logger_reader
*reader
= file
->private_data
;
112 return file
->private_data
;
116 * get_entry_header - returns a pointer to the logger_entry header within
117 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
118 * be provided. Typically the return value will be a pointer within
119 * 'logger->buf'. However, a pointer to 'scratch' may be returned if
120 * the log entry spans the end and beginning of the circular buffer.
122 static struct logger_entry
*get_entry_header(struct logger_log
*log
,
124 struct logger_entry
*scratch
)
126 size_t len
= min(sizeof(struct logger_entry
), log
->size
- off
);
128 if (len
!= sizeof(struct logger_entry
)) {
129 memcpy(((void *)scratch
), log
->buffer
+ off
, len
);
130 memcpy(((void *)scratch
) + len
, log
->buffer
,
131 sizeof(struct logger_entry
) - len
);
135 return (struct logger_entry
*) (log
->buffer
+ off
);
139 * get_entry_msg_len - Grabs the length of the message of the entry
140 * starting from from 'off'.
142 * An entry length is 2 bytes (16 bits) in host endian order.
143 * In the log, the length does not include the size of the log entry structure.
144 * This function returns the size including the log entry structure.
146 * Caller needs to hold log->mutex.
148 static __u32
get_entry_msg_len(struct logger_log
*log
, size_t off
)
150 struct logger_entry scratch
;
151 struct logger_entry
*entry
;
153 entry
= get_entry_header(log
, off
, &scratch
);
157 static size_t get_user_hdr_len(int ver
)
160 return sizeof(struct user_logger_entry_compat
);
161 return sizeof(struct logger_entry
);
164 static ssize_t
copy_header_to_user(int ver
, struct logger_entry
*entry
,
169 struct user_logger_entry_compat v1
;
177 v1
.nsec
= entry
->nsec
;
179 hdr_len
= sizeof(struct user_logger_entry_compat
);
182 hdr_len
= sizeof(struct logger_entry
);
185 return copy_to_user(buf
, hdr
, hdr_len
);
189 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
190 * user-space buffer 'buf'. Returns 'count' on success.
192 * Caller must hold log->mutex.
194 static ssize_t
do_read_log_to_user(struct logger_log
*log
,
195 struct logger_reader
*reader
,
199 struct logger_entry scratch
;
200 struct logger_entry
*entry
;
205 * First, copy the header to userspace, using the version of
206 * the header requested
208 entry
= get_entry_header(log
, reader
->r_off
, &scratch
);
209 if (copy_header_to_user(reader
->r_ver
, entry
, buf
))
212 count
-= get_user_hdr_len(reader
->r_ver
);
213 buf
+= get_user_hdr_len(reader
->r_ver
);
214 msg_start
= logger_offset(log
,
215 reader
->r_off
+ sizeof(struct logger_entry
));
218 * We read from the msg in two disjoint operations. First, we read from
219 * the current msg head offset up to 'count' bytes or to the end of
220 * the log, whichever comes first.
222 len
= min(count
, log
->size
- msg_start
);
223 if (copy_to_user(buf
, log
->buffer
+ msg_start
, len
))
227 * Second, we read any remaining bytes, starting back at the head of
231 if (copy_to_user(buf
+ len
, log
->buffer
, count
- len
))
234 reader
->r_off
= logger_offset(log
, reader
->r_off
+
235 sizeof(struct logger_entry
) + count
);
237 return count
+ get_user_hdr_len(reader
->r_ver
);
241 * get_next_entry_by_uid - Starting at 'off', returns an offset into
242 * 'log->buffer' which contains the first entry readable by 'euid'
244 static size_t get_next_entry_by_uid(struct logger_log
*log
,
245 size_t off
, kuid_t euid
)
247 while (off
!= log
->w_off
) {
248 struct logger_entry
*entry
;
249 struct logger_entry scratch
;
252 entry
= get_entry_header(log
, off
, &scratch
);
254 if (uid_eq(entry
->euid
, euid
))
257 next_len
= sizeof(struct logger_entry
) + entry
->len
;
258 off
= logger_offset(log
, off
+ next_len
);
265 * logger_read - our log's read() method
270 * - If there are no log entries to read, blocks until log is written to
271 * - Atomically reads exactly one log entry
273 * Will set errno to EINVAL if read
274 * buffer is insufficient to hold next entry.
276 static ssize_t
logger_read(struct file
*file
, char __user
*buf
,
277 size_t count
, loff_t
*pos
)
279 struct logger_reader
*reader
= file
->private_data
;
280 struct logger_log
*log
= reader
->log
;
286 mutex_lock(&log
->mutex
);
288 prepare_to_wait(&log
->wq
, &wait
, TASK_INTERRUPTIBLE
);
290 ret
= (log
->w_off
== reader
->r_off
);
291 mutex_unlock(&log
->mutex
);
295 if (file
->f_flags
& O_NONBLOCK
) {
300 if (signal_pending(current
)) {
308 finish_wait(&log
->wq
, &wait
);
312 mutex_lock(&log
->mutex
);
315 reader
->r_off
= get_next_entry_by_uid(log
,
316 reader
->r_off
, current_euid());
318 /* is there still something to read or did we race? */
319 if (unlikely(log
->w_off
== reader
->r_off
)) {
320 mutex_unlock(&log
->mutex
);
324 /* get the size of the next entry */
325 ret
= get_user_hdr_len(reader
->r_ver
) +
326 get_entry_msg_len(log
, reader
->r_off
);
332 /* get exactly one entry from the log */
333 ret
= do_read_log_to_user(log
, reader
, buf
, ret
);
336 mutex_unlock(&log
->mutex
);
342 * get_next_entry - return the offset of the first valid entry at least 'len'
345 * Caller must hold log->mutex.
347 static size_t get_next_entry(struct logger_log
*log
, size_t off
, size_t len
)
352 size_t nr
= sizeof(struct logger_entry
) +
353 get_entry_msg_len(log
, off
);
354 off
= logger_offset(log
, off
+ nr
);
356 } while (count
< len
);
362 * is_between - is a < c < b, accounting for wrapping of a, b, and c
363 * positions in the buffer
365 * That is, if a<b, check for c between a and b
366 * and if a>b, check for c outside (not between) a and b
368 * |------- a xxxxxxxx b --------|
371 * |xxxxx b --------- a xxxxxxxxx|
375 static inline int is_between(size_t a
, size_t b
, size_t c
)
378 /* is c between a and b? */
382 /* is c outside of b through a? */
391 * fix_up_readers - walk the list of all readers and "fix up" any who were
392 * lapped by the writer; also do the same for the default "start head".
393 * We do this by "pulling forward" the readers and start head to the first
394 * entry after the new write head.
396 * The caller needs to hold log->mutex.
398 static void fix_up_readers(struct logger_log
*log
, size_t len
)
400 size_t old
= log
->w_off
;
401 size_t new = logger_offset(log
, old
+ len
);
402 struct logger_reader
*reader
;
404 if (is_between(old
, new, log
->head
))
405 log
->head
= get_next_entry(log
, log
->head
, len
);
407 list_for_each_entry(reader
, &log
->readers
, list
)
408 if (is_between(old
, new, reader
->r_off
))
409 reader
->r_off
= get_next_entry(log
, reader
->r_off
, len
);
413 * logger_write_iter - our write method, implementing support for write(),
414 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
415 * them above all else.
417 static ssize_t
logger_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
419 struct logger_log
*log
= file_get_log(iocb
->ki_filp
);
420 struct logger_entry header
;
422 size_t len
, count
, w_off
;
424 count
= min_t(size_t, iocb
->ki_nbytes
, LOGGER_ENTRY_MAX_PAYLOAD
);
426 now
= current_kernel_time();
428 header
.pid
= current
->tgid
;
429 header
.tid
= current
->pid
;
430 header
.sec
= now
.tv_sec
;
431 header
.nsec
= now
.tv_nsec
;
432 header
.euid
= current_euid();
434 header
.hdr_size
= sizeof(struct logger_entry
);
436 /* null writes succeed, return zero */
437 if (unlikely(!header
.len
))
440 mutex_lock(&log
->mutex
);
443 * Fix up any readers, pulling them forward to the first readable
444 * entry after (what will be) the new write offset. We do this now
445 * because if we partially fail, we can end up with clobbered log
446 * entries that encroach on readable buffer.
448 fix_up_readers(log
, sizeof(struct logger_entry
) + header
.len
);
450 len
= min(sizeof(header
), log
->size
- log
->w_off
);
451 memcpy(log
->buffer
+ log
->w_off
, &header
, len
);
452 memcpy(log
->buffer
, (char *)&header
+ len
, sizeof(header
) - len
);
454 /* Work with a copy until we are ready to commit the whole entry */
455 w_off
= logger_offset(log
, log
->w_off
+ sizeof(struct logger_entry
));
457 len
= min(count
, log
->size
- w_off
);
459 if (copy_from_iter(log
->buffer
+ w_off
, len
, from
) != len
) {
461 * Note that by not updating log->w_off, this abandons the
462 * portion of the new entry that *was* successfully
463 * copied, just above. This is intentional to avoid
464 * message corruption from missing fragments.
466 mutex_unlock(&log
->mutex
);
470 if (copy_from_iter(log
->buffer
, count
- len
, from
) != count
- len
) {
471 mutex_unlock(&log
->mutex
);
475 log
->w_off
= logger_offset(log
, w_off
+ count
);
476 mutex_unlock(&log
->mutex
);
478 /* wake up any blocked readers */
479 wake_up_interruptible(&log
->wq
);
484 static struct logger_log
*get_log_from_minor(int minor
)
486 struct logger_log
*log
;
488 list_for_each_entry(log
, &log_list
, logs
)
489 if (log
->misc
.minor
== minor
)
495 * logger_open - the log's open() file operation
497 * Note how near a no-op this is in the write-only case. Keep it that way!
499 static int logger_open(struct inode
*inode
, struct file
*file
)
501 struct logger_log
*log
;
504 ret
= nonseekable_open(inode
, file
);
508 log
= get_log_from_minor(MINOR(inode
->i_rdev
));
512 if (file
->f_mode
& FMODE_READ
) {
513 struct logger_reader
*reader
;
515 reader
= kmalloc(sizeof(struct logger_reader
), GFP_KERNEL
);
521 reader
->r_all
= in_egroup_p(inode
->i_gid
) ||
524 INIT_LIST_HEAD(&reader
->list
);
526 mutex_lock(&log
->mutex
);
527 reader
->r_off
= log
->head
;
528 list_add_tail(&reader
->list
, &log
->readers
);
529 mutex_unlock(&log
->mutex
);
531 file
->private_data
= reader
;
533 file
->private_data
= log
;
540 * logger_release - the log's release file operation
542 * Note this is a total no-op in the write-only case. Keep it that way!
544 static int logger_release(struct inode
*ignored
, struct file
*file
)
546 if (file
->f_mode
& FMODE_READ
) {
547 struct logger_reader
*reader
= file
->private_data
;
548 struct logger_log
*log
= reader
->log
;
550 mutex_lock(&log
->mutex
);
551 list_del(&reader
->list
);
552 mutex_unlock(&log
->mutex
);
561 * logger_poll - the log's poll file operation, for poll/select/epoll
563 * Note we always return POLLOUT, because you can always write() to the log.
564 * Note also that, strictly speaking, a return value of POLLIN does not
565 * guarantee that the log is readable without blocking, as there is a small
566 * chance that the writer can lap the reader in the interim between poll()
567 * returning and the read() request.
569 static unsigned int logger_poll(struct file
*file
, poll_table
*wait
)
571 struct logger_reader
*reader
;
572 struct logger_log
*log
;
573 unsigned int ret
= POLLOUT
| POLLWRNORM
;
575 if (!(file
->f_mode
& FMODE_READ
))
578 reader
= file
->private_data
;
581 poll_wait(file
, &log
->wq
, wait
);
583 mutex_lock(&log
->mutex
);
585 reader
->r_off
= get_next_entry_by_uid(log
,
586 reader
->r_off
, current_euid());
588 if (log
->w_off
!= reader
->r_off
)
589 ret
|= POLLIN
| POLLRDNORM
;
590 mutex_unlock(&log
->mutex
);
595 static long logger_set_version(struct logger_reader
*reader
, void __user
*arg
)
599 if (copy_from_user(&version
, arg
, sizeof(int)))
602 if ((version
< 1) || (version
> 2))
605 reader
->r_ver
= version
;
609 static long logger_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
611 struct logger_log
*log
= file_get_log(file
);
612 struct logger_reader
*reader
;
614 void __user
*argp
= (void __user
*)arg
;
616 mutex_lock(&log
->mutex
);
619 case LOGGER_GET_LOG_BUF_SIZE
:
622 case LOGGER_GET_LOG_LEN
:
623 if (!(file
->f_mode
& FMODE_READ
)) {
627 reader
= file
->private_data
;
628 if (log
->w_off
>= reader
->r_off
)
629 ret
= log
->w_off
- reader
->r_off
;
631 ret
= (log
->size
- reader
->r_off
) + log
->w_off
;
633 case LOGGER_GET_NEXT_ENTRY_LEN
:
634 if (!(file
->f_mode
& FMODE_READ
)) {
638 reader
= file
->private_data
;
641 reader
->r_off
= get_next_entry_by_uid(log
,
642 reader
->r_off
, current_euid());
644 if (log
->w_off
!= reader
->r_off
)
645 ret
= get_user_hdr_len(reader
->r_ver
) +
646 get_entry_msg_len(log
, reader
->r_off
);
650 case LOGGER_FLUSH_LOG
:
651 if (!(file
->f_mode
& FMODE_WRITE
)) {
655 if (!(in_egroup_p(file_inode(file
)->i_gid
) ||
656 capable(CAP_SYSLOG
))) {
660 list_for_each_entry(reader
, &log
->readers
, list
)
661 reader
->r_off
= log
->w_off
;
662 log
->head
= log
->w_off
;
665 case LOGGER_GET_VERSION
:
666 if (!(file
->f_mode
& FMODE_READ
)) {
670 reader
= file
->private_data
;
673 case LOGGER_SET_VERSION
:
674 if (!(file
->f_mode
& FMODE_READ
)) {
678 reader
= file
->private_data
;
679 ret
= logger_set_version(reader
, argp
);
683 mutex_unlock(&log
->mutex
);
688 static const struct file_operations logger_fops
= {
689 .owner
= THIS_MODULE
,
691 .write_iter
= logger_write_iter
,
693 .unlocked_ioctl
= logger_ioctl
,
694 .compat_ioctl
= logger_ioctl
,
696 .release
= logger_release
,
700 * Log size must must be a power of two, and greater than
701 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
703 static int __init
create_log(char *log_name
, int size
)
706 struct logger_log
*log
;
707 unsigned char *buffer
;
709 buffer
= vmalloc(size
);
713 log
= kzalloc(sizeof(struct logger_log
), GFP_KERNEL
);
716 goto out_free_buffer
;
718 log
->buffer
= buffer
;
720 log
->misc
.minor
= MISC_DYNAMIC_MINOR
;
721 log
->misc
.name
= kstrdup(log_name
, GFP_KERNEL
);
722 if (log
->misc
.name
== NULL
) {
727 log
->misc
.fops
= &logger_fops
;
728 log
->misc
.parent
= NULL
;
730 init_waitqueue_head(&log
->wq
);
731 INIT_LIST_HEAD(&log
->readers
);
732 mutex_init(&log
->mutex
);
737 INIT_LIST_HEAD(&log
->logs
);
738 list_add_tail(&log
->logs
, &log_list
);
740 /* finally, initialize the misc device for this log */
741 ret
= misc_register(&log
->misc
);
743 pr_err("failed to register misc device for log '%s'!\n",
745 goto out_free_misc_name
;
748 pr_info("created %luK log '%s'\n",
749 (unsigned long)log
->size
>> 10, log
->misc
.name
);
754 kfree(log
->misc
.name
);
764 static int __init
logger_init(void)
768 ret
= create_log(LOGGER_LOG_MAIN
, 256*1024);
772 ret
= create_log(LOGGER_LOG_EVENTS
, 256*1024);
776 ret
= create_log(LOGGER_LOG_RADIO
, 256*1024);
780 ret
= create_log(LOGGER_LOG_SYSTEM
, 256*1024);
788 static void __exit
logger_exit(void)
790 struct logger_log
*current_log
, *next_log
;
792 list_for_each_entry_safe(current_log
, next_log
, &log_list
, logs
) {
793 /* we have to delete all the entry inside log_list */
794 misc_deregister(¤t_log
->misc
);
795 vfree(current_log
->buffer
);
796 kfree(current_log
->misc
.name
);
797 list_del(¤t_log
->logs
);
802 device_initcall(logger_init
);
803 module_exit(logger_exit
);
805 MODULE_LICENSE("GPL");
806 MODULE_AUTHOR("Robert Love, <rlove@google.com>");
807 MODULE_DESCRIPTION("Android Logger");