staging: fbtft: remove redundant .owner
[deliverable/linux.git] / drivers / staging / android / logger.c
1 /*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #define pr_fmt(fmt) "logger: " fmt
21
22 #include <linux/sched.h>
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/miscdevice.h>
26 #include <linux/uaccess.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/vmalloc.h>
31 #include <linux/aio.h>
32 #include "logger.h"
33
34 #include <asm/ioctls.h>
35
36 /**
37 * struct logger_log - represents a specific log, such as 'main' or 'radio'
38 * @buffer: The actual ring buffer
39 * @misc: The "misc" device representing the log
40 * @wq: The wait queue for @readers
41 * @readers: This log's readers
42 * @mutex: The mutex that protects the @buffer
43 * @w_off: The current write head offset
44 * @head: The head, or location that readers start reading at.
45 * @size: The size of the log
46 * @logs: The list of log channels
47 *
48 * This structure lives from module insertion until module removal, so it does
49 * not need additional reference counting. The structure is protected by the
50 * mutex 'mutex'.
51 */
52 struct logger_log {
53 unsigned char *buffer;
54 struct miscdevice misc;
55 wait_queue_head_t wq;
56 struct list_head readers;
57 struct mutex mutex;
58 size_t w_off;
59 size_t head;
60 size_t size;
61 struct list_head logs;
62 };
63
64 static LIST_HEAD(log_list);
65
66 /**
67 * struct logger_reader - a logging device open for reading
68 * @log: The associated log
69 * @list: The associated entry in @logger_log's list
70 * @r_off: The current read head offset.
71 * @r_all: Reader can read all entries
72 * @r_ver: Reader ABI version
73 *
74 * This object lives from open to release, so we don't need additional
75 * reference counting. The structure is protected by log->mutex.
76 */
77 struct logger_reader {
78 struct logger_log *log;
79 struct list_head list;
80 size_t r_off;
81 bool r_all;
82 int r_ver;
83 };
84
85 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
86 static size_t logger_offset(struct logger_log *log, size_t n)
87 {
88 return n & (log->size - 1);
89 }
90
91 /*
92 * file_get_log - Given a file structure, return the associated log
93 *
94 * This isn't aesthetic. We have several goals:
95 *
96 * 1) Need to quickly obtain the associated log during an I/O operation
97 * 2) Readers need to maintain state (logger_reader)
98 * 3) Writers need to be very fast (open() should be a near no-op)
99 *
100 * In the reader case, we can trivially go file->logger_reader->logger_log.
101 * For a writer, we don't want to maintain a logger_reader, so we just go
102 * file->logger_log. Thus what file->private_data points at depends on whether
103 * or not the file was opened for reading. This function hides that dirtiness.
104 */
105 static inline struct logger_log *file_get_log(struct file *file)
106 {
107 if (file->f_mode & FMODE_READ) {
108 struct logger_reader *reader = file->private_data;
109
110 return reader->log;
111 }
112 return file->private_data;
113 }
114
115 /*
116 * get_entry_header - returns a pointer to the logger_entry header within
117 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
118 * be provided. Typically the return value will be a pointer within
119 * 'logger->buf'. However, a pointer to 'scratch' may be returned if
120 * the log entry spans the end and beginning of the circular buffer.
121 */
122 static struct logger_entry *get_entry_header(struct logger_log *log,
123 size_t off,
124 struct logger_entry *scratch)
125 {
126 size_t len = min(sizeof(struct logger_entry), log->size - off);
127
128 if (len != sizeof(struct logger_entry)) {
129 memcpy(((void *)scratch), log->buffer + off, len);
130 memcpy(((void *)scratch) + len, log->buffer,
131 sizeof(struct logger_entry) - len);
132 return scratch;
133 }
134
135 return (struct logger_entry *) (log->buffer + off);
136 }
137
138 /*
139 * get_entry_msg_len - Grabs the length of the message of the entry
140 * starting from from 'off'.
141 *
142 * An entry length is 2 bytes (16 bits) in host endian order.
143 * In the log, the length does not include the size of the log entry structure.
144 * This function returns the size including the log entry structure.
145 *
146 * Caller needs to hold log->mutex.
147 */
148 static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
149 {
150 struct logger_entry scratch;
151 struct logger_entry *entry;
152
153 entry = get_entry_header(log, off, &scratch);
154 return entry->len;
155 }
156
157 static size_t get_user_hdr_len(int ver)
158 {
159 if (ver < 2)
160 return sizeof(struct user_logger_entry_compat);
161 return sizeof(struct logger_entry);
162 }
163
164 static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
165 char __user *buf)
166 {
167 void *hdr;
168 size_t hdr_len;
169 struct user_logger_entry_compat v1;
170
171 if (ver < 2) {
172 v1.len = entry->len;
173 v1.__pad = 0;
174 v1.pid = entry->pid;
175 v1.tid = entry->tid;
176 v1.sec = entry->sec;
177 v1.nsec = entry->nsec;
178 hdr = &v1;
179 hdr_len = sizeof(struct user_logger_entry_compat);
180 } else {
181 hdr = entry;
182 hdr_len = sizeof(struct logger_entry);
183 }
184
185 return copy_to_user(buf, hdr, hdr_len);
186 }
187
188 /*
189 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
190 * user-space buffer 'buf'. Returns 'count' on success.
191 *
192 * Caller must hold log->mutex.
193 */
194 static ssize_t do_read_log_to_user(struct logger_log *log,
195 struct logger_reader *reader,
196 char __user *buf,
197 size_t count)
198 {
199 struct logger_entry scratch;
200 struct logger_entry *entry;
201 size_t len;
202 size_t msg_start;
203
204 /*
205 * First, copy the header to userspace, using the version of
206 * the header requested
207 */
208 entry = get_entry_header(log, reader->r_off, &scratch);
209 if (copy_header_to_user(reader->r_ver, entry, buf))
210 return -EFAULT;
211
212 count -= get_user_hdr_len(reader->r_ver);
213 buf += get_user_hdr_len(reader->r_ver);
214 msg_start = logger_offset(log,
215 reader->r_off + sizeof(struct logger_entry));
216
217 /*
218 * We read from the msg in two disjoint operations. First, we read from
219 * the current msg head offset up to 'count' bytes or to the end of
220 * the log, whichever comes first.
221 */
222 len = min(count, log->size - msg_start);
223 if (copy_to_user(buf, log->buffer + msg_start, len))
224 return -EFAULT;
225
226 /*
227 * Second, we read any remaining bytes, starting back at the head of
228 * the log.
229 */
230 if (count != len)
231 if (copy_to_user(buf + len, log->buffer, count - len))
232 return -EFAULT;
233
234 reader->r_off = logger_offset(log, reader->r_off +
235 sizeof(struct logger_entry) + count);
236
237 return count + get_user_hdr_len(reader->r_ver);
238 }
239
240 /*
241 * get_next_entry_by_uid - Starting at 'off', returns an offset into
242 * 'log->buffer' which contains the first entry readable by 'euid'
243 */
244 static size_t get_next_entry_by_uid(struct logger_log *log,
245 size_t off, kuid_t euid)
246 {
247 while (off != log->w_off) {
248 struct logger_entry *entry;
249 struct logger_entry scratch;
250 size_t next_len;
251
252 entry = get_entry_header(log, off, &scratch);
253
254 if (uid_eq(entry->euid, euid))
255 return off;
256
257 next_len = sizeof(struct logger_entry) + entry->len;
258 off = logger_offset(log, off + next_len);
259 }
260
261 return off;
262 }
263
264 /*
265 * logger_read - our log's read() method
266 *
267 * Behavior:
268 *
269 * - O_NONBLOCK works
270 * - If there are no log entries to read, blocks until log is written to
271 * - Atomically reads exactly one log entry
272 *
273 * Will set errno to EINVAL if read
274 * buffer is insufficient to hold next entry.
275 */
276 static ssize_t logger_read(struct file *file, char __user *buf,
277 size_t count, loff_t *pos)
278 {
279 struct logger_reader *reader = file->private_data;
280 struct logger_log *log = reader->log;
281 ssize_t ret;
282 DEFINE_WAIT(wait);
283
284 start:
285 while (1) {
286 mutex_lock(&log->mutex);
287
288 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
289
290 ret = (log->w_off == reader->r_off);
291 mutex_unlock(&log->mutex);
292 if (!ret)
293 break;
294
295 if (file->f_flags & O_NONBLOCK) {
296 ret = -EAGAIN;
297 break;
298 }
299
300 if (signal_pending(current)) {
301 ret = -EINTR;
302 break;
303 }
304
305 schedule();
306 }
307
308 finish_wait(&log->wq, &wait);
309 if (ret)
310 return ret;
311
312 mutex_lock(&log->mutex);
313
314 if (!reader->r_all)
315 reader->r_off = get_next_entry_by_uid(log,
316 reader->r_off, current_euid());
317
318 /* is there still something to read or did we race? */
319 if (unlikely(log->w_off == reader->r_off)) {
320 mutex_unlock(&log->mutex);
321 goto start;
322 }
323
324 /* get the size of the next entry */
325 ret = get_user_hdr_len(reader->r_ver) +
326 get_entry_msg_len(log, reader->r_off);
327 if (count < ret) {
328 ret = -EINVAL;
329 goto out;
330 }
331
332 /* get exactly one entry from the log */
333 ret = do_read_log_to_user(log, reader, buf, ret);
334
335 out:
336 mutex_unlock(&log->mutex);
337
338 return ret;
339 }
340
341 /*
342 * get_next_entry - return the offset of the first valid entry at least 'len'
343 * bytes after 'off'.
344 *
345 * Caller must hold log->mutex.
346 */
347 static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
348 {
349 size_t count = 0;
350
351 do {
352 size_t nr = sizeof(struct logger_entry) +
353 get_entry_msg_len(log, off);
354 off = logger_offset(log, off + nr);
355 count += nr;
356 } while (count < len);
357
358 return off;
359 }
360
361 /*
362 * is_between - is a < c < b, accounting for wrapping of a, b, and c
363 * positions in the buffer
364 *
365 * That is, if a<b, check for c between a and b
366 * and if a>b, check for c outside (not between) a and b
367 *
368 * |------- a xxxxxxxx b --------|
369 * c^
370 *
371 * |xxxxx b --------- a xxxxxxxxx|
372 * c^
373 * or c^
374 */
375 static inline int is_between(size_t a, size_t b, size_t c)
376 {
377 if (a < b) {
378 /* is c between a and b? */
379 if (a < c && c <= b)
380 return 1;
381 } else {
382 /* is c outside of b through a? */
383 if (c <= b || a < c)
384 return 1;
385 }
386
387 return 0;
388 }
389
390 /*
391 * fix_up_readers - walk the list of all readers and "fix up" any who were
392 * lapped by the writer; also do the same for the default "start head".
393 * We do this by "pulling forward" the readers and start head to the first
394 * entry after the new write head.
395 *
396 * The caller needs to hold log->mutex.
397 */
398 static void fix_up_readers(struct logger_log *log, size_t len)
399 {
400 size_t old = log->w_off;
401 size_t new = logger_offset(log, old + len);
402 struct logger_reader *reader;
403
404 if (is_between(old, new, log->head))
405 log->head = get_next_entry(log, log->head, len);
406
407 list_for_each_entry(reader, &log->readers, list)
408 if (is_between(old, new, reader->r_off))
409 reader->r_off = get_next_entry(log, reader->r_off, len);
410 }
411
412 /*
413 * logger_write_iter - our write method, implementing support for write(),
414 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
415 * them above all else.
416 */
417 static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
418 {
419 struct logger_log *log = file_get_log(iocb->ki_filp);
420 struct logger_entry header;
421 struct timespec now;
422 size_t len, count, w_off;
423
424 count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
425
426 now = current_kernel_time();
427
428 header.pid = current->tgid;
429 header.tid = current->pid;
430 header.sec = now.tv_sec;
431 header.nsec = now.tv_nsec;
432 header.euid = current_euid();
433 header.len = count;
434 header.hdr_size = sizeof(struct logger_entry);
435
436 /* null writes succeed, return zero */
437 if (unlikely(!header.len))
438 return 0;
439
440 mutex_lock(&log->mutex);
441
442 /*
443 * Fix up any readers, pulling them forward to the first readable
444 * entry after (what will be) the new write offset. We do this now
445 * because if we partially fail, we can end up with clobbered log
446 * entries that encroach on readable buffer.
447 */
448 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
449
450 len = min(sizeof(header), log->size - log->w_off);
451 memcpy(log->buffer + log->w_off, &header, len);
452 memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
453
454 /* Work with a copy until we are ready to commit the whole entry */
455 w_off = logger_offset(log, log->w_off + sizeof(struct logger_entry));
456
457 len = min(count, log->size - w_off);
458
459 if (copy_from_iter(log->buffer + w_off, len, from) != len) {
460 /*
461 * Note that by not updating log->w_off, this abandons the
462 * portion of the new entry that *was* successfully
463 * copied, just above. This is intentional to avoid
464 * message corruption from missing fragments.
465 */
466 mutex_unlock(&log->mutex);
467 return -EFAULT;
468 }
469
470 if (copy_from_iter(log->buffer, count - len, from) != count - len) {
471 mutex_unlock(&log->mutex);
472 return -EFAULT;
473 }
474
475 log->w_off = logger_offset(log, w_off + count);
476 mutex_unlock(&log->mutex);
477
478 /* wake up any blocked readers */
479 wake_up_interruptible(&log->wq);
480
481 return len;
482 }
483
484 static struct logger_log *get_log_from_minor(int minor)
485 {
486 struct logger_log *log;
487
488 list_for_each_entry(log, &log_list, logs)
489 if (log->misc.minor == minor)
490 return log;
491 return NULL;
492 }
493
494 /*
495 * logger_open - the log's open() file operation
496 *
497 * Note how near a no-op this is in the write-only case. Keep it that way!
498 */
499 static int logger_open(struct inode *inode, struct file *file)
500 {
501 struct logger_log *log;
502 int ret;
503
504 ret = nonseekable_open(inode, file);
505 if (ret)
506 return ret;
507
508 log = get_log_from_minor(MINOR(inode->i_rdev));
509 if (!log)
510 return -ENODEV;
511
512 if (file->f_mode & FMODE_READ) {
513 struct logger_reader *reader;
514
515 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
516 if (!reader)
517 return -ENOMEM;
518
519 reader->log = log;
520 reader->r_ver = 1;
521 reader->r_all = in_egroup_p(inode->i_gid) ||
522 capable(CAP_SYSLOG);
523
524 INIT_LIST_HEAD(&reader->list);
525
526 mutex_lock(&log->mutex);
527 reader->r_off = log->head;
528 list_add_tail(&reader->list, &log->readers);
529 mutex_unlock(&log->mutex);
530
531 file->private_data = reader;
532 } else {
533 file->private_data = log;
534 }
535
536 return 0;
537 }
538
539 /*
540 * logger_release - the log's release file operation
541 *
542 * Note this is a total no-op in the write-only case. Keep it that way!
543 */
544 static int logger_release(struct inode *ignored, struct file *file)
545 {
546 if (file->f_mode & FMODE_READ) {
547 struct logger_reader *reader = file->private_data;
548 struct logger_log *log = reader->log;
549
550 mutex_lock(&log->mutex);
551 list_del(&reader->list);
552 mutex_unlock(&log->mutex);
553
554 kfree(reader);
555 }
556
557 return 0;
558 }
559
560 /*
561 * logger_poll - the log's poll file operation, for poll/select/epoll
562 *
563 * Note we always return POLLOUT, because you can always write() to the log.
564 * Note also that, strictly speaking, a return value of POLLIN does not
565 * guarantee that the log is readable without blocking, as there is a small
566 * chance that the writer can lap the reader in the interim between poll()
567 * returning and the read() request.
568 */
569 static unsigned int logger_poll(struct file *file, poll_table *wait)
570 {
571 struct logger_reader *reader;
572 struct logger_log *log;
573 unsigned int ret = POLLOUT | POLLWRNORM;
574
575 if (!(file->f_mode & FMODE_READ))
576 return ret;
577
578 reader = file->private_data;
579 log = reader->log;
580
581 poll_wait(file, &log->wq, wait);
582
583 mutex_lock(&log->mutex);
584 if (!reader->r_all)
585 reader->r_off = get_next_entry_by_uid(log,
586 reader->r_off, current_euid());
587
588 if (log->w_off != reader->r_off)
589 ret |= POLLIN | POLLRDNORM;
590 mutex_unlock(&log->mutex);
591
592 return ret;
593 }
594
595 static long logger_set_version(struct logger_reader *reader, void __user *arg)
596 {
597 int version;
598
599 if (copy_from_user(&version, arg, sizeof(int)))
600 return -EFAULT;
601
602 if ((version < 1) || (version > 2))
603 return -EINVAL;
604
605 reader->r_ver = version;
606 return 0;
607 }
608
609 static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
610 {
611 struct logger_log *log = file_get_log(file);
612 struct logger_reader *reader;
613 long ret = -EINVAL;
614 void __user *argp = (void __user *)arg;
615
616 mutex_lock(&log->mutex);
617
618 switch (cmd) {
619 case LOGGER_GET_LOG_BUF_SIZE:
620 ret = log->size;
621 break;
622 case LOGGER_GET_LOG_LEN:
623 if (!(file->f_mode & FMODE_READ)) {
624 ret = -EBADF;
625 break;
626 }
627 reader = file->private_data;
628 if (log->w_off >= reader->r_off)
629 ret = log->w_off - reader->r_off;
630 else
631 ret = (log->size - reader->r_off) + log->w_off;
632 break;
633 case LOGGER_GET_NEXT_ENTRY_LEN:
634 if (!(file->f_mode & FMODE_READ)) {
635 ret = -EBADF;
636 break;
637 }
638 reader = file->private_data;
639
640 if (!reader->r_all)
641 reader->r_off = get_next_entry_by_uid(log,
642 reader->r_off, current_euid());
643
644 if (log->w_off != reader->r_off)
645 ret = get_user_hdr_len(reader->r_ver) +
646 get_entry_msg_len(log, reader->r_off);
647 else
648 ret = 0;
649 break;
650 case LOGGER_FLUSH_LOG:
651 if (!(file->f_mode & FMODE_WRITE)) {
652 ret = -EBADF;
653 break;
654 }
655 if (!(in_egroup_p(file_inode(file)->i_gid) ||
656 capable(CAP_SYSLOG))) {
657 ret = -EPERM;
658 break;
659 }
660 list_for_each_entry(reader, &log->readers, list)
661 reader->r_off = log->w_off;
662 log->head = log->w_off;
663 ret = 0;
664 break;
665 case LOGGER_GET_VERSION:
666 if (!(file->f_mode & FMODE_READ)) {
667 ret = -EBADF;
668 break;
669 }
670 reader = file->private_data;
671 ret = reader->r_ver;
672 break;
673 case LOGGER_SET_VERSION:
674 if (!(file->f_mode & FMODE_READ)) {
675 ret = -EBADF;
676 break;
677 }
678 reader = file->private_data;
679 ret = logger_set_version(reader, argp);
680 break;
681 }
682
683 mutex_unlock(&log->mutex);
684
685 return ret;
686 }
687
688 static const struct file_operations logger_fops = {
689 .owner = THIS_MODULE,
690 .read = logger_read,
691 .write_iter = logger_write_iter,
692 .poll = logger_poll,
693 .unlocked_ioctl = logger_ioctl,
694 .compat_ioctl = logger_ioctl,
695 .open = logger_open,
696 .release = logger_release,
697 };
698
699 /*
700 * Log size must must be a power of two, and greater than
701 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
702 */
703 static int __init create_log(char *log_name, int size)
704 {
705 int ret = 0;
706 struct logger_log *log;
707 unsigned char *buffer;
708
709 buffer = vmalloc(size);
710 if (buffer == NULL)
711 return -ENOMEM;
712
713 log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
714 if (log == NULL) {
715 ret = -ENOMEM;
716 goto out_free_buffer;
717 }
718 log->buffer = buffer;
719
720 log->misc.minor = MISC_DYNAMIC_MINOR;
721 log->misc.name = kstrdup(log_name, GFP_KERNEL);
722 if (log->misc.name == NULL) {
723 ret = -ENOMEM;
724 goto out_free_log;
725 }
726
727 log->misc.fops = &logger_fops;
728 log->misc.parent = NULL;
729
730 init_waitqueue_head(&log->wq);
731 INIT_LIST_HEAD(&log->readers);
732 mutex_init(&log->mutex);
733 log->w_off = 0;
734 log->head = 0;
735 log->size = size;
736
737 INIT_LIST_HEAD(&log->logs);
738 list_add_tail(&log->logs, &log_list);
739
740 /* finally, initialize the misc device for this log */
741 ret = misc_register(&log->misc);
742 if (unlikely(ret)) {
743 pr_err("failed to register misc device for log '%s'!\n",
744 log->misc.name);
745 goto out_free_misc_name;
746 }
747
748 pr_info("created %luK log '%s'\n",
749 (unsigned long)log->size >> 10, log->misc.name);
750
751 return 0;
752
753 out_free_misc_name:
754 kfree(log->misc.name);
755
756 out_free_log:
757 kfree(log);
758
759 out_free_buffer:
760 vfree(buffer);
761 return ret;
762 }
763
764 static int __init logger_init(void)
765 {
766 int ret;
767
768 ret = create_log(LOGGER_LOG_MAIN, 256*1024);
769 if (unlikely(ret))
770 goto out;
771
772 ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
773 if (unlikely(ret))
774 goto out;
775
776 ret = create_log(LOGGER_LOG_RADIO, 256*1024);
777 if (unlikely(ret))
778 goto out;
779
780 ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
781 if (unlikely(ret))
782 goto out;
783
784 out:
785 return ret;
786 }
787
788 static void __exit logger_exit(void)
789 {
790 struct logger_log *current_log, *next_log;
791
792 list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
793 /* we have to delete all the entry inside log_list */
794 misc_deregister(&current_log->misc);
795 vfree(current_log->buffer);
796 kfree(current_log->misc.name);
797 list_del(&current_log->logs);
798 kfree(current_log);
799 }
800 }
801
802 device_initcall(logger_init);
803 module_exit(logger_exit);
804
805 MODULE_LICENSE("GPL");
806 MODULE_AUTHOR("Robert Love, <rlove@google.com>");
807 MODULE_DESCRIPTION("Android Logger");
This page took 0.04937 seconds and 5 git commands to generate.