Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / drivers / input / evdev.c
1 /*
2 * Event char devices, giving access to raw input device events.
3 *
4 * Copyright (c) 1999-2002 Vojtech Pavlik
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #define EVDEV_MINOR_BASE 64
14 #define EVDEV_MINORS 32
15 #define EVDEV_MIN_BUFFER_SIZE 64U
16 #define EVDEV_BUF_PACKETS 8
17
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/input/mt.h>
26 #include <linux/major.h>
27 #include <linux/device.h>
28 #include <linux/cdev.h>
29 #include "input-compat.h"
30
31 struct evdev {
32 int open;
33 struct input_handle handle;
34 wait_queue_head_t wait;
35 struct evdev_client __rcu *grab;
36 struct list_head client_list;
37 spinlock_t client_lock; /* protects client_list */
38 struct mutex mutex;
39 struct device dev;
40 struct cdev cdev;
41 bool exist;
42 };
43
44 struct evdev_client {
45 unsigned int head;
46 unsigned int tail;
47 unsigned int packet_head; /* [future] position of the first element of next packet */
48 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
49 struct fasync_struct *fasync;
50 struct evdev *evdev;
51 struct list_head node;
52 int clkid;
53 bool revoked;
54 unsigned int bufsize;
55 struct input_event buffer[];
56 };
57
58 /* flush queued events of type @type, caller must hold client->buffer_lock */
59 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
60 {
61 unsigned int i, head, num;
62 unsigned int mask = client->bufsize - 1;
63 bool is_report;
64 struct input_event *ev;
65
66 BUG_ON(type == EV_SYN);
67
68 head = client->tail;
69 client->packet_head = client->tail;
70
71 /* init to 1 so a leading SYN_REPORT will not be dropped */
72 num = 1;
73
74 for (i = client->tail; i != client->head; i = (i + 1) & mask) {
75 ev = &client->buffer[i];
76 is_report = ev->type == EV_SYN && ev->code == SYN_REPORT;
77
78 if (ev->type == type) {
79 /* drop matched entry */
80 continue;
81 } else if (is_report && !num) {
82 /* drop empty SYN_REPORT groups */
83 continue;
84 } else if (head != i) {
85 /* move entry to fill the gap */
86 client->buffer[head].time = ev->time;
87 client->buffer[head].type = ev->type;
88 client->buffer[head].code = ev->code;
89 client->buffer[head].value = ev->value;
90 }
91
92 num++;
93 head = (head + 1) & mask;
94
95 if (is_report) {
96 num = 0;
97 client->packet_head = head;
98 }
99 }
100
101 client->head = head;
102 }
103
104 /* queue SYN_DROPPED event */
105 static void evdev_queue_syn_dropped(struct evdev_client *client)
106 {
107 unsigned long flags;
108 struct input_event ev;
109 ktime_t time;
110
111 time = (client->clkid == CLOCK_MONOTONIC) ?
112 ktime_get() : ktime_get_real();
113
114 ev.time = ktime_to_timeval(time);
115 ev.type = EV_SYN;
116 ev.code = SYN_DROPPED;
117 ev.value = 0;
118
119 spin_lock_irqsave(&client->buffer_lock, flags);
120
121 client->buffer[client->head++] = ev;
122 client->head &= client->bufsize - 1;
123
124 if (unlikely(client->head == client->tail)) {
125 /* drop queue but keep our SYN_DROPPED event */
126 client->tail = (client->head - 1) & (client->bufsize - 1);
127 client->packet_head = client->tail;
128 }
129
130 spin_unlock_irqrestore(&client->buffer_lock, flags);
131 }
132
133 static void __pass_event(struct evdev_client *client,
134 const struct input_event *event)
135 {
136 client->buffer[client->head++] = *event;
137 client->head &= client->bufsize - 1;
138
139 if (unlikely(client->head == client->tail)) {
140 /*
141 * This effectively "drops" all unconsumed events, leaving
142 * EV_SYN/SYN_DROPPED plus the newest event in the queue.
143 */
144 client->tail = (client->head - 2) & (client->bufsize - 1);
145
146 client->buffer[client->tail].time = event->time;
147 client->buffer[client->tail].type = EV_SYN;
148 client->buffer[client->tail].code = SYN_DROPPED;
149 client->buffer[client->tail].value = 0;
150
151 client->packet_head = client->tail;
152 }
153
154 if (event->type == EV_SYN && event->code == SYN_REPORT) {
155 client->packet_head = client->head;
156 kill_fasync(&client->fasync, SIGIO, POLL_IN);
157 }
158 }
159
160 static void evdev_pass_values(struct evdev_client *client,
161 const struct input_value *vals, unsigned int count,
162 ktime_t mono, ktime_t real)
163 {
164 struct evdev *evdev = client->evdev;
165 const struct input_value *v;
166 struct input_event event;
167 bool wakeup = false;
168
169 if (client->revoked)
170 return;
171
172 event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
173 mono : real);
174
175 /* Interrupts are disabled, just acquire the lock. */
176 spin_lock(&client->buffer_lock);
177
178 for (v = vals; v != vals + count; v++) {
179 event.type = v->type;
180 event.code = v->code;
181 event.value = v->value;
182 __pass_event(client, &event);
183 if (v->type == EV_SYN && v->code == SYN_REPORT)
184 wakeup = true;
185 }
186
187 spin_unlock(&client->buffer_lock);
188
189 if (wakeup)
190 wake_up_interruptible(&evdev->wait);
191 }
192
193 /*
194 * Pass incoming events to all connected clients.
195 */
196 static void evdev_events(struct input_handle *handle,
197 const struct input_value *vals, unsigned int count)
198 {
199 struct evdev *evdev = handle->private;
200 struct evdev_client *client;
201 ktime_t time_mono, time_real;
202
203 time_mono = ktime_get();
204 time_real = ktime_mono_to_real(time_mono);
205
206 rcu_read_lock();
207
208 client = rcu_dereference(evdev->grab);
209
210 if (client)
211 evdev_pass_values(client, vals, count, time_mono, time_real);
212 else
213 list_for_each_entry_rcu(client, &evdev->client_list, node)
214 evdev_pass_values(client, vals, count,
215 time_mono, time_real);
216
217 rcu_read_unlock();
218 }
219
220 /*
221 * Pass incoming event to all connected clients.
222 */
223 static void evdev_event(struct input_handle *handle,
224 unsigned int type, unsigned int code, int value)
225 {
226 struct input_value vals[] = { { type, code, value } };
227
228 evdev_events(handle, vals, 1);
229 }
230
231 static int evdev_fasync(int fd, struct file *file, int on)
232 {
233 struct evdev_client *client = file->private_data;
234
235 return fasync_helper(fd, file, on, &client->fasync);
236 }
237
238 static int evdev_flush(struct file *file, fl_owner_t id)
239 {
240 struct evdev_client *client = file->private_data;
241 struct evdev *evdev = client->evdev;
242 int retval;
243
244 retval = mutex_lock_interruptible(&evdev->mutex);
245 if (retval)
246 return retval;
247
248 if (!evdev->exist || client->revoked)
249 retval = -ENODEV;
250 else
251 retval = input_flush_device(&evdev->handle, file);
252
253 mutex_unlock(&evdev->mutex);
254 return retval;
255 }
256
257 static void evdev_free(struct device *dev)
258 {
259 struct evdev *evdev = container_of(dev, struct evdev, dev);
260
261 input_put_device(evdev->handle.dev);
262 kfree(evdev);
263 }
264
265 /*
266 * Grabs an event device (along with underlying input device).
267 * This function is called with evdev->mutex taken.
268 */
269 static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
270 {
271 int error;
272
273 if (evdev->grab)
274 return -EBUSY;
275
276 error = input_grab_device(&evdev->handle);
277 if (error)
278 return error;
279
280 rcu_assign_pointer(evdev->grab, client);
281
282 return 0;
283 }
284
285 static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
286 {
287 struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
288 lockdep_is_held(&evdev->mutex));
289
290 if (grab != client)
291 return -EINVAL;
292
293 rcu_assign_pointer(evdev->grab, NULL);
294 synchronize_rcu();
295 input_release_device(&evdev->handle);
296
297 return 0;
298 }
299
300 static void evdev_attach_client(struct evdev *evdev,
301 struct evdev_client *client)
302 {
303 spin_lock(&evdev->client_lock);
304 list_add_tail_rcu(&client->node, &evdev->client_list);
305 spin_unlock(&evdev->client_lock);
306 }
307
308 static void evdev_detach_client(struct evdev *evdev,
309 struct evdev_client *client)
310 {
311 spin_lock(&evdev->client_lock);
312 list_del_rcu(&client->node);
313 spin_unlock(&evdev->client_lock);
314 synchronize_rcu();
315 }
316
317 static int evdev_open_device(struct evdev *evdev)
318 {
319 int retval;
320
321 retval = mutex_lock_interruptible(&evdev->mutex);
322 if (retval)
323 return retval;
324
325 if (!evdev->exist)
326 retval = -ENODEV;
327 else if (!evdev->open++) {
328 retval = input_open_device(&evdev->handle);
329 if (retval)
330 evdev->open--;
331 }
332
333 mutex_unlock(&evdev->mutex);
334 return retval;
335 }
336
337 static void evdev_close_device(struct evdev *evdev)
338 {
339 mutex_lock(&evdev->mutex);
340
341 if (evdev->exist && !--evdev->open)
342 input_close_device(&evdev->handle);
343
344 mutex_unlock(&evdev->mutex);
345 }
346
347 /*
348 * Wake up users waiting for IO so they can disconnect from
349 * dead device.
350 */
351 static void evdev_hangup(struct evdev *evdev)
352 {
353 struct evdev_client *client;
354
355 spin_lock(&evdev->client_lock);
356 list_for_each_entry(client, &evdev->client_list, node)
357 kill_fasync(&client->fasync, SIGIO, POLL_HUP);
358 spin_unlock(&evdev->client_lock);
359
360 wake_up_interruptible(&evdev->wait);
361 }
362
363 static int evdev_release(struct inode *inode, struct file *file)
364 {
365 struct evdev_client *client = file->private_data;
366 struct evdev *evdev = client->evdev;
367
368 mutex_lock(&evdev->mutex);
369 evdev_ungrab(evdev, client);
370 mutex_unlock(&evdev->mutex);
371
372 evdev_detach_client(evdev, client);
373
374 if (is_vmalloc_addr(client))
375 vfree(client);
376 else
377 kfree(client);
378
379 evdev_close_device(evdev);
380
381 return 0;
382 }
383
384 static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
385 {
386 unsigned int n_events =
387 max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS,
388 EVDEV_MIN_BUFFER_SIZE);
389
390 return roundup_pow_of_two(n_events);
391 }
392
393 static int evdev_open(struct inode *inode, struct file *file)
394 {
395 struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
396 unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
397 unsigned int size = sizeof(struct evdev_client) +
398 bufsize * sizeof(struct input_event);
399 struct evdev_client *client;
400 int error;
401
402 client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
403 if (!client)
404 client = vzalloc(size);
405 if (!client)
406 return -ENOMEM;
407
408 client->bufsize = bufsize;
409 spin_lock_init(&client->buffer_lock);
410 client->evdev = evdev;
411 evdev_attach_client(evdev, client);
412
413 error = evdev_open_device(evdev);
414 if (error)
415 goto err_free_client;
416
417 file->private_data = client;
418 nonseekable_open(inode, file);
419
420 return 0;
421
422 err_free_client:
423 evdev_detach_client(evdev, client);
424 kfree(client);
425 return error;
426 }
427
428 static ssize_t evdev_write(struct file *file, const char __user *buffer,
429 size_t count, loff_t *ppos)
430 {
431 struct evdev_client *client = file->private_data;
432 struct evdev *evdev = client->evdev;
433 struct input_event event;
434 int retval = 0;
435
436 if (count != 0 && count < input_event_size())
437 return -EINVAL;
438
439 retval = mutex_lock_interruptible(&evdev->mutex);
440 if (retval)
441 return retval;
442
443 if (!evdev->exist || client->revoked) {
444 retval = -ENODEV;
445 goto out;
446 }
447
448 while (retval + input_event_size() <= count) {
449
450 if (input_event_from_user(buffer + retval, &event)) {
451 retval = -EFAULT;
452 goto out;
453 }
454 retval += input_event_size();
455
456 input_inject_event(&evdev->handle,
457 event.type, event.code, event.value);
458 }
459
460 out:
461 mutex_unlock(&evdev->mutex);
462 return retval;
463 }
464
465 static int evdev_fetch_next_event(struct evdev_client *client,
466 struct input_event *event)
467 {
468 int have_event;
469
470 spin_lock_irq(&client->buffer_lock);
471
472 have_event = client->packet_head != client->tail;
473 if (have_event) {
474 *event = client->buffer[client->tail++];
475 client->tail &= client->bufsize - 1;
476 }
477
478 spin_unlock_irq(&client->buffer_lock);
479
480 return have_event;
481 }
482
483 static ssize_t evdev_read(struct file *file, char __user *buffer,
484 size_t count, loff_t *ppos)
485 {
486 struct evdev_client *client = file->private_data;
487 struct evdev *evdev = client->evdev;
488 struct input_event event;
489 size_t read = 0;
490 int error;
491
492 if (count != 0 && count < input_event_size())
493 return -EINVAL;
494
495 for (;;) {
496 if (!evdev->exist || client->revoked)
497 return -ENODEV;
498
499 if (client->packet_head == client->tail &&
500 (file->f_flags & O_NONBLOCK))
501 return -EAGAIN;
502
503 /*
504 * count == 0 is special - no IO is done but we check
505 * for error conditions (see above).
506 */
507 if (count == 0)
508 break;
509
510 while (read + input_event_size() <= count &&
511 evdev_fetch_next_event(client, &event)) {
512
513 if (input_event_to_user(buffer + read, &event))
514 return -EFAULT;
515
516 read += input_event_size();
517 }
518
519 if (read)
520 break;
521
522 if (!(file->f_flags & O_NONBLOCK)) {
523 error = wait_event_interruptible(evdev->wait,
524 client->packet_head != client->tail ||
525 !evdev->exist || client->revoked);
526 if (error)
527 return error;
528 }
529 }
530
531 return read;
532 }
533
534 /* No kernel lock - fine */
535 static unsigned int evdev_poll(struct file *file, poll_table *wait)
536 {
537 struct evdev_client *client = file->private_data;
538 struct evdev *evdev = client->evdev;
539 unsigned int mask;
540
541 poll_wait(file, &evdev->wait, wait);
542
543 if (evdev->exist && !client->revoked)
544 mask = POLLOUT | POLLWRNORM;
545 else
546 mask = POLLHUP | POLLERR;
547
548 if (client->packet_head != client->tail)
549 mask |= POLLIN | POLLRDNORM;
550
551 return mask;
552 }
553
554 #ifdef CONFIG_COMPAT
555
556 #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
557 #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
558
559 #ifdef __BIG_ENDIAN
560 static int bits_to_user(unsigned long *bits, unsigned int maxbit,
561 unsigned int maxlen, void __user *p, int compat)
562 {
563 int len, i;
564
565 if (compat) {
566 len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
567 if (len > maxlen)
568 len = maxlen;
569
570 for (i = 0; i < len / sizeof(compat_long_t); i++)
571 if (copy_to_user((compat_long_t __user *) p + i,
572 (compat_long_t *) bits +
573 i + 1 - ((i % 2) << 1),
574 sizeof(compat_long_t)))
575 return -EFAULT;
576 } else {
577 len = BITS_TO_LONGS(maxbit) * sizeof(long);
578 if (len > maxlen)
579 len = maxlen;
580
581 if (copy_to_user(p, bits, len))
582 return -EFAULT;
583 }
584
585 return len;
586 }
587 #else
588 static int bits_to_user(unsigned long *bits, unsigned int maxbit,
589 unsigned int maxlen, void __user *p, int compat)
590 {
591 int len = compat ?
592 BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
593 BITS_TO_LONGS(maxbit) * sizeof(long);
594
595 if (len > maxlen)
596 len = maxlen;
597
598 return copy_to_user(p, bits, len) ? -EFAULT : len;
599 }
600 #endif /* __BIG_ENDIAN */
601
602 #else
603
604 static int bits_to_user(unsigned long *bits, unsigned int maxbit,
605 unsigned int maxlen, void __user *p, int compat)
606 {
607 int len = BITS_TO_LONGS(maxbit) * sizeof(long);
608
609 if (len > maxlen)
610 len = maxlen;
611
612 return copy_to_user(p, bits, len) ? -EFAULT : len;
613 }
614
615 #endif /* CONFIG_COMPAT */
616
617 static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
618 {
619 int len;
620
621 if (!str)
622 return -ENOENT;
623
624 len = strlen(str) + 1;
625 if (len > maxlen)
626 len = maxlen;
627
628 return copy_to_user(p, str, len) ? -EFAULT : len;
629 }
630
631 static int handle_eviocgbit(struct input_dev *dev,
632 unsigned int type, unsigned int size,
633 void __user *p, int compat_mode)
634 {
635 unsigned long *bits;
636 int len;
637
638 switch (type) {
639
640 case 0: bits = dev->evbit; len = EV_MAX; break;
641 case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
642 case EV_REL: bits = dev->relbit; len = REL_MAX; break;
643 case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
644 case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
645 case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
646 case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
647 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
648 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
649 default: return -EINVAL;
650 }
651
652 return bits_to_user(bits, len, size, p, compat_mode);
653 }
654
655 static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
656 {
657 struct input_keymap_entry ke = {
658 .len = sizeof(unsigned int),
659 .flags = 0,
660 };
661 int __user *ip = (int __user *)p;
662 int error;
663
664 /* legacy case */
665 if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
666 return -EFAULT;
667
668 error = input_get_keycode(dev, &ke);
669 if (error)
670 return error;
671
672 if (put_user(ke.keycode, ip + 1))
673 return -EFAULT;
674
675 return 0;
676 }
677
678 static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p)
679 {
680 struct input_keymap_entry ke;
681 int error;
682
683 if (copy_from_user(&ke, p, sizeof(ke)))
684 return -EFAULT;
685
686 error = input_get_keycode(dev, &ke);
687 if (error)
688 return error;
689
690 if (copy_to_user(p, &ke, sizeof(ke)))
691 return -EFAULT;
692
693 return 0;
694 }
695
696 static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p)
697 {
698 struct input_keymap_entry ke = {
699 .len = sizeof(unsigned int),
700 .flags = 0,
701 };
702 int __user *ip = (int __user *)p;
703
704 if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
705 return -EFAULT;
706
707 if (get_user(ke.keycode, ip + 1))
708 return -EFAULT;
709
710 return input_set_keycode(dev, &ke);
711 }
712
713 static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
714 {
715 struct input_keymap_entry ke;
716
717 if (copy_from_user(&ke, p, sizeof(ke)))
718 return -EFAULT;
719
720 if (ke.len > sizeof(ke.scancode))
721 return -EINVAL;
722
723 return input_set_keycode(dev, &ke);
724 }
725
726 /*
727 * If we transfer state to the user, we should flush all pending events
728 * of the same type from the client's queue. Otherwise, they might end up
729 * with duplicate events, which can screw up client's state tracking.
730 * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
731 * event so user-space will notice missing events.
732 *
733 * LOCKING:
734 * We need to take event_lock before buffer_lock to avoid dead-locks. But we
735 * need the even_lock only to guarantee consistent state. We can safely release
736 * it while flushing the queue. This allows input-core to handle filters while
737 * we flush the queue.
738 */
739 static int evdev_handle_get_val(struct evdev_client *client,
740 struct input_dev *dev, unsigned int type,
741 unsigned long *bits, unsigned int max,
742 unsigned int size, void __user *p, int compat)
743 {
744 int ret;
745 unsigned long *mem;
746
747 mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL);
748 if (!mem)
749 return -ENOMEM;
750
751 spin_lock_irq(&dev->event_lock);
752 spin_lock(&client->buffer_lock);
753
754 memcpy(mem, bits, sizeof(unsigned long) * max);
755
756 spin_unlock(&dev->event_lock);
757
758 __evdev_flush_queue(client, type);
759
760 spin_unlock_irq(&client->buffer_lock);
761
762 ret = bits_to_user(mem, max, size, p, compat);
763 if (ret < 0)
764 evdev_queue_syn_dropped(client);
765
766 kfree(mem);
767
768 return ret;
769 }
770
771 static int evdev_handle_mt_request(struct input_dev *dev,
772 unsigned int size,
773 int __user *ip)
774 {
775 const struct input_mt *mt = dev->mt;
776 unsigned int code;
777 int max_slots;
778 int i;
779
780 if (get_user(code, &ip[0]))
781 return -EFAULT;
782 if (!mt || !input_is_mt_value(code))
783 return -EINVAL;
784
785 max_slots = (size - sizeof(__u32)) / sizeof(__s32);
786 for (i = 0; i < mt->num_slots && i < max_slots; i++) {
787 int value = input_mt_get_value(&mt->slots[i], code);
788 if (put_user(value, &ip[1 + i]))
789 return -EFAULT;
790 }
791
792 return 0;
793 }
794
795 static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
796 struct file *file)
797 {
798 client->revoked = true;
799 evdev_ungrab(evdev, client);
800 input_flush_device(&evdev->handle, file);
801 wake_up_interruptible(&evdev->wait);
802
803 return 0;
804 }
805
806 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
807 void __user *p, int compat_mode)
808 {
809 struct evdev_client *client = file->private_data;
810 struct evdev *evdev = client->evdev;
811 struct input_dev *dev = evdev->handle.dev;
812 struct input_absinfo abs;
813 struct ff_effect effect;
814 int __user *ip = (int __user *)p;
815 unsigned int i, t, u, v;
816 unsigned int size;
817 int error;
818
819 /* First we check for fixed-length commands */
820 switch (cmd) {
821
822 case EVIOCGVERSION:
823 return put_user(EV_VERSION, ip);
824
825 case EVIOCGID:
826 if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
827 return -EFAULT;
828 return 0;
829
830 case EVIOCGREP:
831 if (!test_bit(EV_REP, dev->evbit))
832 return -ENOSYS;
833 if (put_user(dev->rep[REP_DELAY], ip))
834 return -EFAULT;
835 if (put_user(dev->rep[REP_PERIOD], ip + 1))
836 return -EFAULT;
837 return 0;
838
839 case EVIOCSREP:
840 if (!test_bit(EV_REP, dev->evbit))
841 return -ENOSYS;
842 if (get_user(u, ip))
843 return -EFAULT;
844 if (get_user(v, ip + 1))
845 return -EFAULT;
846
847 input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
848 input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
849
850 return 0;
851
852 case EVIOCRMFF:
853 return input_ff_erase(dev, (int)(unsigned long) p, file);
854
855 case EVIOCGEFFECTS:
856 i = test_bit(EV_FF, dev->evbit) ?
857 dev->ff->max_effects : 0;
858 if (put_user(i, ip))
859 return -EFAULT;
860 return 0;
861
862 case EVIOCGRAB:
863 if (p)
864 return evdev_grab(evdev, client);
865 else
866 return evdev_ungrab(evdev, client);
867
868 case EVIOCREVOKE:
869 if (p)
870 return -EINVAL;
871 else
872 return evdev_revoke(evdev, client, file);
873
874 case EVIOCSCLOCKID:
875 if (copy_from_user(&i, p, sizeof(unsigned int)))
876 return -EFAULT;
877 if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
878 return -EINVAL;
879 client->clkid = i;
880 return 0;
881
882 case EVIOCGKEYCODE:
883 return evdev_handle_get_keycode(dev, p);
884
885 case EVIOCSKEYCODE:
886 return evdev_handle_set_keycode(dev, p);
887
888 case EVIOCGKEYCODE_V2:
889 return evdev_handle_get_keycode_v2(dev, p);
890
891 case EVIOCSKEYCODE_V2:
892 return evdev_handle_set_keycode_v2(dev, p);
893 }
894
895 size = _IOC_SIZE(cmd);
896
897 /* Now check variable-length commands */
898 #define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
899 switch (EVIOC_MASK_SIZE(cmd)) {
900
901 case EVIOCGPROP(0):
902 return bits_to_user(dev->propbit, INPUT_PROP_MAX,
903 size, p, compat_mode);
904
905 case EVIOCGMTSLOTS(0):
906 return evdev_handle_mt_request(dev, size, ip);
907
908 case EVIOCGKEY(0):
909 return evdev_handle_get_val(client, dev, EV_KEY, dev->key,
910 KEY_MAX, size, p, compat_mode);
911
912 case EVIOCGLED(0):
913 return evdev_handle_get_val(client, dev, EV_LED, dev->led,
914 LED_MAX, size, p, compat_mode);
915
916 case EVIOCGSND(0):
917 return evdev_handle_get_val(client, dev, EV_SND, dev->snd,
918 SND_MAX, size, p, compat_mode);
919
920 case EVIOCGSW(0):
921 return evdev_handle_get_val(client, dev, EV_SW, dev->sw,
922 SW_MAX, size, p, compat_mode);
923
924 case EVIOCGNAME(0):
925 return str_to_user(dev->name, size, p);
926
927 case EVIOCGPHYS(0):
928 return str_to_user(dev->phys, size, p);
929
930 case EVIOCGUNIQ(0):
931 return str_to_user(dev->uniq, size, p);
932
933 case EVIOC_MASK_SIZE(EVIOCSFF):
934 if (input_ff_effect_from_user(p, size, &effect))
935 return -EFAULT;
936
937 error = input_ff_upload(dev, &effect, file);
938 if (error)
939 return error;
940
941 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
942 return -EFAULT;
943
944 return 0;
945 }
946
947 /* Multi-number variable-length handlers */
948 if (_IOC_TYPE(cmd) != 'E')
949 return -EINVAL;
950
951 if (_IOC_DIR(cmd) == _IOC_READ) {
952
953 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
954 return handle_eviocgbit(dev,
955 _IOC_NR(cmd) & EV_MAX, size,
956 p, compat_mode);
957
958 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
959
960 if (!dev->absinfo)
961 return -EINVAL;
962
963 t = _IOC_NR(cmd) & ABS_MAX;
964 abs = dev->absinfo[t];
965
966 if (copy_to_user(p, &abs, min_t(size_t,
967 size, sizeof(struct input_absinfo))))
968 return -EFAULT;
969
970 return 0;
971 }
972 }
973
974 if (_IOC_DIR(cmd) == _IOC_WRITE) {
975
976 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
977
978 if (!dev->absinfo)
979 return -EINVAL;
980
981 t = _IOC_NR(cmd) & ABS_MAX;
982
983 if (copy_from_user(&abs, p, min_t(size_t,
984 size, sizeof(struct input_absinfo))))
985 return -EFAULT;
986
987 if (size < sizeof(struct input_absinfo))
988 abs.resolution = 0;
989
990 /* We can't change number of reserved MT slots */
991 if (t == ABS_MT_SLOT)
992 return -EINVAL;
993
994 /*
995 * Take event lock to ensure that we are not
996 * changing device parameters in the middle
997 * of event.
998 */
999 spin_lock_irq(&dev->event_lock);
1000 dev->absinfo[t] = abs;
1001 spin_unlock_irq(&dev->event_lock);
1002
1003 return 0;
1004 }
1005 }
1006
1007 return -EINVAL;
1008 }
1009
1010 static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
1011 void __user *p, int compat_mode)
1012 {
1013 struct evdev_client *client = file->private_data;
1014 struct evdev *evdev = client->evdev;
1015 int retval;
1016
1017 retval = mutex_lock_interruptible(&evdev->mutex);
1018 if (retval)
1019 return retval;
1020
1021 if (!evdev->exist || client->revoked) {
1022 retval = -ENODEV;
1023 goto out;
1024 }
1025
1026 retval = evdev_do_ioctl(file, cmd, p, compat_mode);
1027
1028 out:
1029 mutex_unlock(&evdev->mutex);
1030 return retval;
1031 }
1032
1033 static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1034 {
1035 return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
1036 }
1037
1038 #ifdef CONFIG_COMPAT
1039 static long evdev_ioctl_compat(struct file *file,
1040 unsigned int cmd, unsigned long arg)
1041 {
1042 return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
1043 }
1044 #endif
1045
1046 static const struct file_operations evdev_fops = {
1047 .owner = THIS_MODULE,
1048 .read = evdev_read,
1049 .write = evdev_write,
1050 .poll = evdev_poll,
1051 .open = evdev_open,
1052 .release = evdev_release,
1053 .unlocked_ioctl = evdev_ioctl,
1054 #ifdef CONFIG_COMPAT
1055 .compat_ioctl = evdev_ioctl_compat,
1056 #endif
1057 .fasync = evdev_fasync,
1058 .flush = evdev_flush,
1059 .llseek = no_llseek,
1060 };
1061
1062 /*
1063 * Mark device non-existent. This disables writes, ioctls and
1064 * prevents new users from opening the device. Already posted
1065 * blocking reads will stay, however new ones will fail.
1066 */
1067 static void evdev_mark_dead(struct evdev *evdev)
1068 {
1069 mutex_lock(&evdev->mutex);
1070 evdev->exist = false;
1071 mutex_unlock(&evdev->mutex);
1072 }
1073
1074 static void evdev_cleanup(struct evdev *evdev)
1075 {
1076 struct input_handle *handle = &evdev->handle;
1077
1078 evdev_mark_dead(evdev);
1079 evdev_hangup(evdev);
1080
1081 cdev_del(&evdev->cdev);
1082
1083 /* evdev is marked dead so no one else accesses evdev->open */
1084 if (evdev->open) {
1085 input_flush_device(handle, NULL);
1086 input_close_device(handle);
1087 }
1088 }
1089
1090 /*
1091 * Create new evdev device. Note that input core serializes calls
1092 * to connect and disconnect.
1093 */
1094 static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
1095 const struct input_device_id *id)
1096 {
1097 struct evdev *evdev;
1098 int minor;
1099 int dev_no;
1100 int error;
1101
1102 minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true);
1103 if (minor < 0) {
1104 error = minor;
1105 pr_err("failed to reserve new minor: %d\n", error);
1106 return error;
1107 }
1108
1109 evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
1110 if (!evdev) {
1111 error = -ENOMEM;
1112 goto err_free_minor;
1113 }
1114
1115 INIT_LIST_HEAD(&evdev->client_list);
1116 spin_lock_init(&evdev->client_lock);
1117 mutex_init(&evdev->mutex);
1118 init_waitqueue_head(&evdev->wait);
1119 evdev->exist = true;
1120
1121 dev_no = minor;
1122 /* Normalize device number if it falls into legacy range */
1123 if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
1124 dev_no -= EVDEV_MINOR_BASE;
1125 dev_set_name(&evdev->dev, "event%d", dev_no);
1126
1127 evdev->handle.dev = input_get_device(dev);
1128 evdev->handle.name = dev_name(&evdev->dev);
1129 evdev->handle.handler = handler;
1130 evdev->handle.private = evdev;
1131
1132 evdev->dev.devt = MKDEV(INPUT_MAJOR, minor);
1133 evdev->dev.class = &input_class;
1134 evdev->dev.parent = &dev->dev;
1135 evdev->dev.release = evdev_free;
1136 device_initialize(&evdev->dev);
1137
1138 error = input_register_handle(&evdev->handle);
1139 if (error)
1140 goto err_free_evdev;
1141
1142 cdev_init(&evdev->cdev, &evdev_fops);
1143 evdev->cdev.kobj.parent = &evdev->dev.kobj;
1144 error = cdev_add(&evdev->cdev, evdev->dev.devt, 1);
1145 if (error)
1146 goto err_unregister_handle;
1147
1148 error = device_add(&evdev->dev);
1149 if (error)
1150 goto err_cleanup_evdev;
1151
1152 return 0;
1153
1154 err_cleanup_evdev:
1155 evdev_cleanup(evdev);
1156 err_unregister_handle:
1157 input_unregister_handle(&evdev->handle);
1158 err_free_evdev:
1159 put_device(&evdev->dev);
1160 err_free_minor:
1161 input_free_minor(minor);
1162 return error;
1163 }
1164
1165 static void evdev_disconnect(struct input_handle *handle)
1166 {
1167 struct evdev *evdev = handle->private;
1168
1169 device_del(&evdev->dev);
1170 evdev_cleanup(evdev);
1171 input_free_minor(MINOR(evdev->dev.devt));
1172 input_unregister_handle(handle);
1173 put_device(&evdev->dev);
1174 }
1175
1176 static const struct input_device_id evdev_ids[] = {
1177 { .driver_info = 1 }, /* Matches all devices */
1178 { }, /* Terminating zero entry */
1179 };
1180
1181 MODULE_DEVICE_TABLE(input, evdev_ids);
1182
1183 static struct input_handler evdev_handler = {
1184 .event = evdev_event,
1185 .events = evdev_events,
1186 .connect = evdev_connect,
1187 .disconnect = evdev_disconnect,
1188 .legacy_minors = true,
1189 .minor = EVDEV_MINOR_BASE,
1190 .name = "evdev",
1191 .id_table = evdev_ids,
1192 };
1193
1194 static int __init evdev_init(void)
1195 {
1196 return input_register_handler(&evdev_handler);
1197 }
1198
1199 static void __exit evdev_exit(void)
1200 {
1201 input_unregister_handler(&evdev_handler);
1202 }
1203
1204 module_init(evdev_init);
1205 module_exit(evdev_exit);
1206
1207 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
1208 MODULE_DESCRIPTION("Input driver event char devices");
1209 MODULE_LICENSE("GPL");
This page took 0.073414 seconds and 6 git commands to generate.