Merge branch 'linux-2.6.33'
[deliverable/linux.git] / drivers / input / serio / serio.c
1 /*
2 * The Serio abstraction module
3 *
4 * Copyright (c) 1999-2004 Vojtech Pavlik
5 * Copyright (c) 2004 Dmitry Torokhov
6 * Copyright (c) 2003 Daniele Bellucci
7 */
8
9 /*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */
28
29 #include <linux/stddef.h>
30 #include <linux/module.h>
31 #include <linux/serio.h>
32 #include <linux/errno.h>
33 #include <linux/wait.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kthread.h>
37 #include <linux/mutex.h>
38 #include <linux/freezer.h>
39
40 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
41 MODULE_DESCRIPTION("Serio abstraction core");
42 MODULE_LICENSE("GPL");
43
44 /*
45 * serio_mutex protects entire serio subsystem and is taken every time
46 * serio port or driver registrered or unregistered.
47 */
48 static DEFINE_MUTEX(serio_mutex);
49
50 static LIST_HEAD(serio_list);
51
52 static struct bus_type serio_bus;
53
54 static void serio_add_port(struct serio *serio);
55 static int serio_reconnect_port(struct serio *serio);
56 static void serio_disconnect_port(struct serio *serio);
57 static void serio_reconnect_chain(struct serio *serio);
58 static void serio_attach_driver(struct serio_driver *drv);
59
60 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
61 {
62 int retval;
63
64 mutex_lock(&serio->drv_mutex);
65 retval = drv->connect(serio, drv);
66 mutex_unlock(&serio->drv_mutex);
67
68 return retval;
69 }
70
71 static int serio_reconnect_driver(struct serio *serio)
72 {
73 int retval = -1;
74
75 mutex_lock(&serio->drv_mutex);
76 if (serio->drv && serio->drv->reconnect)
77 retval = serio->drv->reconnect(serio);
78 mutex_unlock(&serio->drv_mutex);
79
80 return retval;
81 }
82
83 static void serio_disconnect_driver(struct serio *serio)
84 {
85 mutex_lock(&serio->drv_mutex);
86 if (serio->drv)
87 serio->drv->disconnect(serio);
88 mutex_unlock(&serio->drv_mutex);
89 }
90
91 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
92 {
93 while (ids->type || ids->proto) {
94 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
95 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
96 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
97 (ids->id == SERIO_ANY || ids->id == serio->id.id))
98 return 1;
99 ids++;
100 }
101 return 0;
102 }
103
104 /*
105 * Basic serio -> driver core mappings
106 */
107
108 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
109 {
110 int error;
111
112 if (serio_match_port(drv->id_table, serio)) {
113
114 serio->dev.driver = &drv->driver;
115 if (serio_connect_driver(serio, drv)) {
116 serio->dev.driver = NULL;
117 return -ENODEV;
118 }
119
120 error = device_bind_driver(&serio->dev);
121 if (error) {
122 printk(KERN_WARNING
123 "serio: device_bind_driver() failed "
124 "for %s (%s) and %s, error: %d\n",
125 serio->phys, serio->name,
126 drv->description, error);
127 serio_disconnect_driver(serio);
128 serio->dev.driver = NULL;
129 return error;
130 }
131 }
132 return 0;
133 }
134
135 static void serio_find_driver(struct serio *serio)
136 {
137 int error;
138
139 error = device_attach(&serio->dev);
140 if (error < 0)
141 printk(KERN_WARNING
142 "serio: device_attach() failed for %s (%s), error: %d\n",
143 serio->phys, serio->name, error);
144 }
145
146
147 /*
148 * Serio event processing.
149 */
150
151 enum serio_event_type {
152 SERIO_RESCAN_PORT,
153 SERIO_RECONNECT_PORT,
154 SERIO_RECONNECT_CHAIN,
155 SERIO_REGISTER_PORT,
156 SERIO_ATTACH_DRIVER,
157 };
158
159 struct serio_event {
160 enum serio_event_type type;
161 void *object;
162 struct module *owner;
163 struct list_head node;
164 };
165
166 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
167 static LIST_HEAD(serio_event_list);
168 static DECLARE_WAIT_QUEUE_HEAD(serio_wait);
169 static struct task_struct *serio_task;
170
171 static int serio_queue_event(void *object, struct module *owner,
172 enum serio_event_type event_type)
173 {
174 unsigned long flags;
175 struct serio_event *event;
176 int retval = 0;
177
178 spin_lock_irqsave(&serio_event_lock, flags);
179
180 /*
181 * Scan event list for the other events for the same serio port,
182 * starting with the most recent one. If event is the same we
183 * do not need add new one. If event is of different type we
184 * need to add this event and should not look further because
185 * we need to preseve sequence of distinct events.
186 */
187 list_for_each_entry_reverse(event, &serio_event_list, node) {
188 if (event->object == object) {
189 if (event->type == event_type)
190 goto out;
191 break;
192 }
193 }
194
195 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
196 if (!event) {
197 printk(KERN_ERR
198 "serio: Not enough memory to queue event %d\n",
199 event_type);
200 retval = -ENOMEM;
201 goto out;
202 }
203
204 if (!try_module_get(owner)) {
205 printk(KERN_WARNING
206 "serio: Can't get module reference, dropping event %d\n",
207 event_type);
208 kfree(event);
209 retval = -EINVAL;
210 goto out;
211 }
212
213 event->type = event_type;
214 event->object = object;
215 event->owner = owner;
216
217 list_add_tail(&event->node, &serio_event_list);
218 wake_up(&serio_wait);
219
220 out:
221 spin_unlock_irqrestore(&serio_event_lock, flags);
222 return retval;
223 }
224
225 static void serio_free_event(struct serio_event *event)
226 {
227 module_put(event->owner);
228 kfree(event);
229 }
230
231 static void serio_remove_duplicate_events(struct serio_event *event)
232 {
233 struct list_head *node, *next;
234 struct serio_event *e;
235 unsigned long flags;
236
237 spin_lock_irqsave(&serio_event_lock, flags);
238
239 list_for_each_safe(node, next, &serio_event_list) {
240 e = list_entry(node, struct serio_event, node);
241 if (event->object == e->object) {
242 /*
243 * If this event is of different type we should not
244 * look further - we only suppress duplicate events
245 * that were sent back-to-back.
246 */
247 if (event->type != e->type)
248 break;
249
250 list_del_init(node);
251 serio_free_event(e);
252 }
253 }
254
255 spin_unlock_irqrestore(&serio_event_lock, flags);
256 }
257
258
259 static struct serio_event *serio_get_event(void)
260 {
261 struct serio_event *event;
262 struct list_head *node;
263 unsigned long flags;
264
265 spin_lock_irqsave(&serio_event_lock, flags);
266
267 if (list_empty(&serio_event_list)) {
268 spin_unlock_irqrestore(&serio_event_lock, flags);
269 return NULL;
270 }
271
272 node = serio_event_list.next;
273 event = list_entry(node, struct serio_event, node);
274 list_del_init(node);
275
276 spin_unlock_irqrestore(&serio_event_lock, flags);
277
278 return event;
279 }
280
281 static void serio_handle_event(void)
282 {
283 struct serio_event *event;
284
285 mutex_lock(&serio_mutex);
286
287 while ((event = serio_get_event())) {
288
289 switch (event->type) {
290 case SERIO_REGISTER_PORT:
291 serio_add_port(event->object);
292 break;
293
294 case SERIO_RECONNECT_PORT:
295 serio_reconnect_port(event->object);
296 break;
297
298 case SERIO_RESCAN_PORT:
299 serio_disconnect_port(event->object);
300 serio_find_driver(event->object);
301 break;
302
303 case SERIO_RECONNECT_CHAIN:
304 serio_reconnect_chain(event->object);
305 break;
306
307 case SERIO_ATTACH_DRIVER:
308 serio_attach_driver(event->object);
309 break;
310
311 default:
312 break;
313 }
314
315 serio_remove_duplicate_events(event);
316 serio_free_event(event);
317 }
318
319 mutex_unlock(&serio_mutex);
320 }
321
322 /*
323 * Remove all events that have been submitted for a given
324 * object, be it serio port or driver.
325 */
326 static void serio_remove_pending_events(void *object)
327 {
328 struct list_head *node, *next;
329 struct serio_event *event;
330 unsigned long flags;
331
332 spin_lock_irqsave(&serio_event_lock, flags);
333
334 list_for_each_safe(node, next, &serio_event_list) {
335 event = list_entry(node, struct serio_event, node);
336 if (event->object == object) {
337 list_del_init(node);
338 serio_free_event(event);
339 }
340 }
341
342 spin_unlock_irqrestore(&serio_event_lock, flags);
343 }
344
345 /*
346 * Destroy child serio port (if any) that has not been fully registered yet.
347 *
348 * Note that we rely on the fact that port can have only one child and therefore
349 * only one child registration request can be pending. Additionally, children
350 * are registered by driver's connect() handler so there can't be a grandchild
351 * pending registration together with a child.
352 */
353 static struct serio *serio_get_pending_child(struct serio *parent)
354 {
355 struct serio_event *event;
356 struct serio *serio, *child = NULL;
357 unsigned long flags;
358
359 spin_lock_irqsave(&serio_event_lock, flags);
360
361 list_for_each_entry(event, &serio_event_list, node) {
362 if (event->type == SERIO_REGISTER_PORT) {
363 serio = event->object;
364 if (serio->parent == parent) {
365 child = serio;
366 break;
367 }
368 }
369 }
370
371 spin_unlock_irqrestore(&serio_event_lock, flags);
372 return child;
373 }
374
375 static int serio_thread(void *nothing)
376 {
377 do {
378 serio_handle_event();
379 wait_event_interruptible(serio_wait,
380 kthread_should_stop() || !list_empty(&serio_event_list));
381 } while (!kthread_should_stop());
382
383 printk(KERN_DEBUG "serio: kseriod exiting\n");
384 return 0;
385 }
386
387
388 /*
389 * Serio port operations
390 */
391
392 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
393 {
394 struct serio *serio = to_serio_port(dev);
395 return sprintf(buf, "%s\n", serio->name);
396 }
397
398 static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
399 {
400 struct serio *serio = to_serio_port(dev);
401
402 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
403 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
404 }
405
406 static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf)
407 {
408 struct serio *serio = to_serio_port(dev);
409 return sprintf(buf, "%02x\n", serio->id.type);
410 }
411
412 static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf)
413 {
414 struct serio *serio = to_serio_port(dev);
415 return sprintf(buf, "%02x\n", serio->id.proto);
416 }
417
418 static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf)
419 {
420 struct serio *serio = to_serio_port(dev);
421 return sprintf(buf, "%02x\n", serio->id.id);
422 }
423
424 static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf)
425 {
426 struct serio *serio = to_serio_port(dev);
427 return sprintf(buf, "%02x\n", serio->id.extra);
428 }
429
430 static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL);
431 static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL);
432 static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL);
433 static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL);
434
435 static struct attribute *serio_device_id_attrs[] = {
436 &dev_attr_type.attr,
437 &dev_attr_proto.attr,
438 &dev_attr_id.attr,
439 &dev_attr_extra.attr,
440 NULL
441 };
442
443 static struct attribute_group serio_id_attr_group = {
444 .name = "id",
445 .attrs = serio_device_id_attrs,
446 };
447
448 static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
449 {
450 struct serio *serio = to_serio_port(dev);
451 struct device_driver *drv;
452 int error;
453
454 error = mutex_lock_interruptible(&serio_mutex);
455 if (error)
456 return error;
457
458 if (!strncmp(buf, "none", count)) {
459 serio_disconnect_port(serio);
460 } else if (!strncmp(buf, "reconnect", count)) {
461 serio_reconnect_chain(serio);
462 } else if (!strncmp(buf, "rescan", count)) {
463 serio_disconnect_port(serio);
464 serio_find_driver(serio);
465 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
466 serio_disconnect_port(serio);
467 error = serio_bind_driver(serio, to_serio_driver(drv));
468 put_driver(drv);
469 } else {
470 error = -EINVAL;
471 }
472
473 mutex_unlock(&serio_mutex);
474
475 return error ? error : count;
476 }
477
478 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
479 {
480 struct serio *serio = to_serio_port(dev);
481 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
482 }
483
484 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
485 {
486 struct serio *serio = to_serio_port(dev);
487 int retval;
488
489 retval = count;
490 if (!strncmp(buf, "manual", count)) {
491 serio->manual_bind = true;
492 } else if (!strncmp(buf, "auto", count)) {
493 serio->manual_bind = false;
494 } else {
495 retval = -EINVAL;
496 }
497
498 return retval;
499 }
500
501 static struct device_attribute serio_device_attrs[] = {
502 __ATTR(description, S_IRUGO, serio_show_description, NULL),
503 __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL),
504 __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver),
505 __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode),
506 __ATTR_NULL
507 };
508
509
510 static void serio_release_port(struct device *dev)
511 {
512 struct serio *serio = to_serio_port(dev);
513
514 kfree(serio);
515 module_put(THIS_MODULE);
516 }
517
518 /*
519 * Prepare serio port for registration.
520 */
521 static void serio_init_port(struct serio *serio)
522 {
523 static atomic_t serio_no = ATOMIC_INIT(0);
524
525 __module_get(THIS_MODULE);
526
527 INIT_LIST_HEAD(&serio->node);
528 spin_lock_init(&serio->lock);
529 mutex_init(&serio->drv_mutex);
530 device_initialize(&serio->dev);
531 dev_set_name(&serio->dev, "serio%ld",
532 (long)atomic_inc_return(&serio_no) - 1);
533 serio->dev.bus = &serio_bus;
534 serio->dev.release = serio_release_port;
535 if (serio->parent) {
536 serio->dev.parent = &serio->parent->dev;
537 serio->depth = serio->parent->depth + 1;
538 } else
539 serio->depth = 0;
540 lockdep_set_subclass(&serio->lock, serio->depth);
541 }
542
543 /*
544 * Complete serio port registration.
545 * Driver core will attempt to find appropriate driver for the port.
546 */
547 static void serio_add_port(struct serio *serio)
548 {
549 int error;
550
551 if (serio->parent) {
552 serio_pause_rx(serio->parent);
553 serio->parent->child = serio;
554 serio_continue_rx(serio->parent);
555 }
556
557 list_add_tail(&serio->node, &serio_list);
558 if (serio->start)
559 serio->start(serio);
560 error = device_add(&serio->dev);
561 if (error)
562 printk(KERN_ERR
563 "serio: device_add() failed for %s (%s), error: %d\n",
564 serio->phys, serio->name, error);
565 else {
566 serio->registered = true;
567 error = sysfs_create_group(&serio->dev.kobj, &serio_id_attr_group);
568 if (error)
569 printk(KERN_ERR
570 "serio: sysfs_create_group() failed for %s (%s), error: %d\n",
571 serio->phys, serio->name, error);
572 }
573 }
574
575 /*
576 * serio_destroy_port() completes deregistration process and removes
577 * port from the system
578 */
579 static void serio_destroy_port(struct serio *serio)
580 {
581 struct serio *child;
582
583 child = serio_get_pending_child(serio);
584 if (child) {
585 serio_remove_pending_events(child);
586 put_device(&child->dev);
587 }
588
589 if (serio->stop)
590 serio->stop(serio);
591
592 if (serio->parent) {
593 serio_pause_rx(serio->parent);
594 serio->parent->child = NULL;
595 serio_continue_rx(serio->parent);
596 serio->parent = NULL;
597 }
598
599 if (serio->registered) {
600 sysfs_remove_group(&serio->dev.kobj, &serio_id_attr_group);
601 device_del(&serio->dev);
602 serio->registered = false;
603 }
604
605 list_del_init(&serio->node);
606 serio_remove_pending_events(serio);
607 put_device(&serio->dev);
608 }
609
610 /*
611 * Reconnect serio port (re-initialize attached device).
612 * If reconnect fails (old device is no longer attached or
613 * there was no device to begin with) we do full rescan in
614 * hope of finding a driver for the port.
615 */
616 static int serio_reconnect_port(struct serio *serio)
617 {
618 int error = serio_reconnect_driver(serio);
619
620 if (error) {
621 serio_disconnect_port(serio);
622 serio_find_driver(serio);
623 }
624
625 return error;
626 }
627
628 /*
629 * Reconnect serio port and all its children (re-initialize attached devices)
630 */
631 static void serio_reconnect_chain(struct serio *serio)
632 {
633 do {
634 if (serio_reconnect_port(serio)) {
635 /* Ok, old children are now gone, we are done */
636 break;
637 }
638 serio = serio->child;
639 } while (serio);
640 }
641
642 /*
643 * serio_disconnect_port() unbinds a port from its driver. As a side effect
644 * all child ports are unbound and destroyed.
645 */
646 static void serio_disconnect_port(struct serio *serio)
647 {
648 struct serio *s, *parent;
649
650 if (serio->child) {
651 /*
652 * Children ports should be disconnected and destroyed
653 * first, staring with the leaf one, since we don't want
654 * to do recursion
655 */
656 for (s = serio; s->child; s = s->child)
657 /* empty */;
658
659 do {
660 parent = s->parent;
661
662 device_release_driver(&s->dev);
663 serio_destroy_port(s);
664 } while ((s = parent) != serio);
665 }
666
667 /*
668 * Ok, no children left, now disconnect this port
669 */
670 device_release_driver(&serio->dev);
671 }
672
673 void serio_rescan(struct serio *serio)
674 {
675 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
676 }
677 EXPORT_SYMBOL(serio_rescan);
678
679 void serio_reconnect(struct serio *serio)
680 {
681 serio_queue_event(serio, NULL, SERIO_RECONNECT_CHAIN);
682 }
683 EXPORT_SYMBOL(serio_reconnect);
684
685 /*
686 * Submits register request to kseriod for subsequent execution.
687 * Note that port registration is always asynchronous.
688 */
689 void __serio_register_port(struct serio *serio, struct module *owner)
690 {
691 serio_init_port(serio);
692 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
693 }
694 EXPORT_SYMBOL(__serio_register_port);
695
696 /*
697 * Synchronously unregisters serio port.
698 */
699 void serio_unregister_port(struct serio *serio)
700 {
701 mutex_lock(&serio_mutex);
702 serio_disconnect_port(serio);
703 serio_destroy_port(serio);
704 mutex_unlock(&serio_mutex);
705 }
706 EXPORT_SYMBOL(serio_unregister_port);
707
708 /*
709 * Safely unregisters child port if one is present.
710 */
711 void serio_unregister_child_port(struct serio *serio)
712 {
713 mutex_lock(&serio_mutex);
714 if (serio->child) {
715 serio_disconnect_port(serio->child);
716 serio_destroy_port(serio->child);
717 }
718 mutex_unlock(&serio_mutex);
719 }
720 EXPORT_SYMBOL(serio_unregister_child_port);
721
722
723 /*
724 * Serio driver operations
725 */
726
727 static ssize_t serio_driver_show_description(struct device_driver *drv, char *buf)
728 {
729 struct serio_driver *driver = to_serio_driver(drv);
730 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
731 }
732
733 static ssize_t serio_driver_show_bind_mode(struct device_driver *drv, char *buf)
734 {
735 struct serio_driver *serio_drv = to_serio_driver(drv);
736 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
737 }
738
739 static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char *buf, size_t count)
740 {
741 struct serio_driver *serio_drv = to_serio_driver(drv);
742 int retval;
743
744 retval = count;
745 if (!strncmp(buf, "manual", count)) {
746 serio_drv->manual_bind = true;
747 } else if (!strncmp(buf, "auto", count)) {
748 serio_drv->manual_bind = false;
749 } else {
750 retval = -EINVAL;
751 }
752
753 return retval;
754 }
755
756
757 static struct driver_attribute serio_driver_attrs[] = {
758 __ATTR(description, S_IRUGO, serio_driver_show_description, NULL),
759 __ATTR(bind_mode, S_IWUSR | S_IRUGO,
760 serio_driver_show_bind_mode, serio_driver_set_bind_mode),
761 __ATTR_NULL
762 };
763
764 static int serio_driver_probe(struct device *dev)
765 {
766 struct serio *serio = to_serio_port(dev);
767 struct serio_driver *drv = to_serio_driver(dev->driver);
768
769 return serio_connect_driver(serio, drv);
770 }
771
772 static int serio_driver_remove(struct device *dev)
773 {
774 struct serio *serio = to_serio_port(dev);
775
776 serio_disconnect_driver(serio);
777 return 0;
778 }
779
780 static void serio_cleanup(struct serio *serio)
781 {
782 mutex_lock(&serio->drv_mutex);
783 if (serio->drv && serio->drv->cleanup)
784 serio->drv->cleanup(serio);
785 mutex_unlock(&serio->drv_mutex);
786 }
787
788 static void serio_shutdown(struct device *dev)
789 {
790 struct serio *serio = to_serio_port(dev);
791
792 serio_cleanup(serio);
793 }
794
795 static void serio_attach_driver(struct serio_driver *drv)
796 {
797 int error;
798
799 error = driver_attach(&drv->driver);
800 if (error)
801 printk(KERN_WARNING
802 "serio: driver_attach() failed for %s with error %d\n",
803 drv->driver.name, error);
804 }
805
806 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
807 {
808 bool manual_bind = drv->manual_bind;
809 int error;
810
811 drv->driver.bus = &serio_bus;
812 drv->driver.owner = owner;
813 drv->driver.mod_name = mod_name;
814
815 /*
816 * Temporarily disable automatic binding because probing
817 * takes long time and we are better off doing it in kseriod
818 */
819 drv->manual_bind = true;
820
821 error = driver_register(&drv->driver);
822 if (error) {
823 printk(KERN_ERR
824 "serio: driver_register() failed for %s, error: %d\n",
825 drv->driver.name, error);
826 return error;
827 }
828
829 /*
830 * Restore original bind mode and let kseriod bind the
831 * driver to free ports
832 */
833 if (!manual_bind) {
834 drv->manual_bind = false;
835 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
836 if (error) {
837 driver_unregister(&drv->driver);
838 return error;
839 }
840 }
841
842 return 0;
843 }
844 EXPORT_SYMBOL(__serio_register_driver);
845
846 void serio_unregister_driver(struct serio_driver *drv)
847 {
848 struct serio *serio;
849
850 mutex_lock(&serio_mutex);
851
852 drv->manual_bind = true; /* so serio_find_driver ignores it */
853 serio_remove_pending_events(drv);
854
855 start_over:
856 list_for_each_entry(serio, &serio_list, node) {
857 if (serio->drv == drv) {
858 serio_disconnect_port(serio);
859 serio_find_driver(serio);
860 /* we could've deleted some ports, restart */
861 goto start_over;
862 }
863 }
864
865 driver_unregister(&drv->driver);
866 mutex_unlock(&serio_mutex);
867 }
868 EXPORT_SYMBOL(serio_unregister_driver);
869
870 static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
871 {
872 serio_pause_rx(serio);
873 serio->drv = drv;
874 serio_continue_rx(serio);
875 }
876
877 static int serio_bus_match(struct device *dev, struct device_driver *drv)
878 {
879 struct serio *serio = to_serio_port(dev);
880 struct serio_driver *serio_drv = to_serio_driver(drv);
881
882 if (serio->manual_bind || serio_drv->manual_bind)
883 return 0;
884
885 return serio_match_port(serio_drv->id_table, serio);
886 }
887
888 #ifdef CONFIG_HOTPLUG
889
890 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \
891 do { \
892 int err = add_uevent_var(env, fmt, val); \
893 if (err) \
894 return err; \
895 } while (0)
896
897 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
898 {
899 struct serio *serio;
900
901 if (!dev)
902 return -ENODEV;
903
904 serio = to_serio_port(dev);
905
906 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
907 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
908 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
909 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
910 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
911 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
912
913 return 0;
914 }
915 #undef SERIO_ADD_UEVENT_VAR
916
917 #else
918
919 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
920 {
921 return -ENODEV;
922 }
923
924 #endif /* CONFIG_HOTPLUG */
925
926 #ifdef CONFIG_PM
927 static int serio_suspend(struct device *dev)
928 {
929 struct serio *serio = to_serio_port(dev);
930
931 serio_cleanup(serio);
932
933 return 0;
934 }
935
936 static int serio_resume(struct device *dev)
937 {
938 struct serio *serio = to_serio_port(dev);
939
940 /*
941 * Driver reconnect can take a while, so better let kseriod
942 * deal with it.
943 */
944 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
945
946 return 0;
947 }
948
949 static const struct dev_pm_ops serio_pm_ops = {
950 .suspend = serio_suspend,
951 .resume = serio_resume,
952 .poweroff = serio_suspend,
953 .restore = serio_resume,
954 };
955 #endif /* CONFIG_PM */
956
957 /* called from serio_driver->connect/disconnect methods under serio_mutex */
958 int serio_open(struct serio *serio, struct serio_driver *drv)
959 {
960 serio_set_drv(serio, drv);
961
962 if (serio->open && serio->open(serio)) {
963 serio_set_drv(serio, NULL);
964 return -1;
965 }
966 return 0;
967 }
968 EXPORT_SYMBOL(serio_open);
969
970 /* called from serio_driver->connect/disconnect methods under serio_mutex */
971 void serio_close(struct serio *serio)
972 {
973 if (serio->close)
974 serio->close(serio);
975
976 serio_set_drv(serio, NULL);
977 }
978 EXPORT_SYMBOL(serio_close);
979
980 irqreturn_t serio_interrupt(struct serio *serio,
981 unsigned char data, unsigned int dfl)
982 {
983 unsigned long flags;
984 irqreturn_t ret = IRQ_NONE;
985
986 spin_lock_irqsave(&serio->lock, flags);
987
988 if (likely(serio->drv)) {
989 ret = serio->drv->interrupt(serio, data, dfl);
990 } else if (!dfl && serio->registered) {
991 serio_rescan(serio);
992 ret = IRQ_HANDLED;
993 }
994
995 spin_unlock_irqrestore(&serio->lock, flags);
996
997 return ret;
998 }
999 EXPORT_SYMBOL(serio_interrupt);
1000
1001 static struct bus_type serio_bus = {
1002 .name = "serio",
1003 .dev_attrs = serio_device_attrs,
1004 .drv_attrs = serio_driver_attrs,
1005 .match = serio_bus_match,
1006 .uevent = serio_uevent,
1007 .probe = serio_driver_probe,
1008 .remove = serio_driver_remove,
1009 .shutdown = serio_shutdown,
1010 #ifdef CONFIG_PM
1011 .pm = &serio_pm_ops,
1012 #endif
1013 };
1014
1015 static int __init serio_init(void)
1016 {
1017 int error;
1018
1019 error = bus_register(&serio_bus);
1020 if (error) {
1021 printk(KERN_ERR "serio: failed to register serio bus, error: %d\n", error);
1022 return error;
1023 }
1024
1025 serio_task = kthread_run(serio_thread, NULL, "kseriod");
1026 if (IS_ERR(serio_task)) {
1027 bus_unregister(&serio_bus);
1028 error = PTR_ERR(serio_task);
1029 printk(KERN_ERR "serio: Failed to start kseriod, error: %d\n", error);
1030 return error;
1031 }
1032
1033 return 0;
1034 }
1035
1036 static void __exit serio_exit(void)
1037 {
1038 bus_unregister(&serio_bus);
1039 kthread_stop(serio_task);
1040 }
1041
1042 subsys_initcall(serio_init);
1043 module_exit(serio_exit);
This page took 0.076212 seconds and 5 git commands to generate.