llseek: automatically add .llseek fop
[deliverable/linux.git] / drivers / s390 / cio / css.c
1 /*
2 * driver for channel subsystem
3 *
4 * Copyright IBM Corp. 2002, 2009
5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <linux/proc_fs.h>
22 #include <asm/isc.h>
23 #include <asm/crw.h>
24
25 #include "css.h"
26 #include "cio.h"
27 #include "cio_debug.h"
28 #include "ioasm.h"
29 #include "chsc.h"
30 #include "device.h"
31 #include "idset.h"
32 #include "chp.h"
33
34 int css_init_done = 0;
35 int max_ssid;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42 struct subchannel_id schid;
43 int ret;
44
45 init_subchannel_id(&schid);
46 ret = -ENODEV;
47 do {
48 do {
49 ret = fn(schid, data);
50 if (ret)
51 break;
52 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53 schid.sch_no = 0;
54 } while (schid.ssid++ < max_ssid);
55 return ret;
56 }
57
58 struct cb_data {
59 void *data;
60 struct idset *set;
61 int (*fn_known_sch)(struct subchannel *, void *);
62 int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67 struct subchannel *sch = to_subchannel(dev);
68 struct cb_data *cb = data;
69 int rc = 0;
70
71 idset_sch_del(cb->set, sch->schid);
72 if (cb->fn_known_sch)
73 rc = cb->fn_known_sch(sch, cb->data);
74 return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79 struct cb_data *cb = data;
80 int rc = 0;
81
82 if (idset_sch_contains(cb->set, schid))
83 rc = cb->fn_unknown_sch(schid, cb->data);
84 return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89 struct cb_data *cb = data;
90 struct subchannel *sch;
91 int rc = 0;
92
93 sch = get_subchannel_by_schid(schid);
94 if (sch) {
95 if (cb->fn_known_sch)
96 rc = cb->fn_known_sch(sch, cb->data);
97 put_device(&sch->dev);
98 } else {
99 if (cb->fn_unknown_sch)
100 rc = cb->fn_unknown_sch(schid, cb->data);
101 }
102
103 return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107 int (*fn_unknown)(struct subchannel_id,
108 void *), void *data)
109 {
110 struct cb_data cb;
111 int rc;
112
113 cb.data = data;
114 cb.fn_known_sch = fn_known;
115 cb.fn_unknown_sch = fn_unknown;
116
117 cb.set = idset_sch_new();
118 if (!cb.set)
119 /* fall back to brute force scanning in case of oom */
120 return for_each_subchannel(call_fn_all_sch, &cb);
121
122 idset_fill(cb.set);
123
124 /* Process registered subchannels. */
125 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126 if (rc)
127 goto out;
128 /* Process unregistered subchannels. */
129 if (fn_unknown)
130 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132 idset_free(cb.set);
133
134 return rc;
135 }
136
137 static void css_sch_todo(struct work_struct *work);
138
139 static struct subchannel *
140 css_alloc_subchannel(struct subchannel_id schid)
141 {
142 struct subchannel *sch;
143 int ret;
144
145 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
146 if (sch == NULL)
147 return ERR_PTR(-ENOMEM);
148 ret = cio_validate_subchannel (sch, schid);
149 if (ret < 0) {
150 kfree(sch);
151 return ERR_PTR(ret);
152 }
153 INIT_WORK(&sch->todo_work, css_sch_todo);
154 return sch;
155 }
156
157 static void
158 css_subchannel_release(struct device *dev)
159 {
160 struct subchannel *sch;
161
162 sch = to_subchannel(dev);
163 if (!cio_is_console(sch->schid)) {
164 /* Reset intparm to zeroes. */
165 sch->config.intparm = 0;
166 cio_commit_config(sch);
167 kfree(sch->lock);
168 kfree(sch);
169 }
170 }
171
172 static int css_sch_device_register(struct subchannel *sch)
173 {
174 int ret;
175
176 mutex_lock(&sch->reg_mutex);
177 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
178 sch->schid.sch_no);
179 ret = device_register(&sch->dev);
180 mutex_unlock(&sch->reg_mutex);
181 return ret;
182 }
183
184 /**
185 * css_sch_device_unregister - unregister a subchannel
186 * @sch: subchannel to be unregistered
187 */
188 void css_sch_device_unregister(struct subchannel *sch)
189 {
190 mutex_lock(&sch->reg_mutex);
191 if (device_is_registered(&sch->dev))
192 device_unregister(&sch->dev);
193 mutex_unlock(&sch->reg_mutex);
194 }
195 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
196
197 static void css_sch_todo(struct work_struct *work)
198 {
199 struct subchannel *sch;
200 enum sch_todo todo;
201
202 sch = container_of(work, struct subchannel, todo_work);
203 /* Find out todo. */
204 spin_lock_irq(sch->lock);
205 todo = sch->todo;
206 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
207 sch->schid.sch_no, todo);
208 sch->todo = SCH_TODO_NOTHING;
209 spin_unlock_irq(sch->lock);
210 /* Perform todo. */
211 if (todo == SCH_TODO_UNREG)
212 css_sch_device_unregister(sch);
213 /* Release workqueue ref. */
214 put_device(&sch->dev);
215 }
216
217 /**
218 * css_sched_sch_todo - schedule a subchannel operation
219 * @sch: subchannel
220 * @todo: todo
221 *
222 * Schedule the operation identified by @todo to be performed on the slow path
223 * workqueue. Do nothing if another operation with higher priority is already
224 * scheduled. Needs to be called with subchannel lock held.
225 */
226 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
227 {
228 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
229 sch->schid.ssid, sch->schid.sch_no, todo);
230 if (sch->todo >= todo)
231 return;
232 /* Get workqueue ref. */
233 if (!get_device(&sch->dev))
234 return;
235 sch->todo = todo;
236 if (!queue_work(cio_work_q, &sch->todo_work)) {
237 /* Already queued, release workqueue ref. */
238 put_device(&sch->dev);
239 }
240 }
241
242 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
243 {
244 int i;
245 int mask;
246
247 memset(ssd, 0, sizeof(struct chsc_ssd_info));
248 ssd->path_mask = pmcw->pim;
249 for (i = 0; i < 8; i++) {
250 mask = 0x80 >> i;
251 if (pmcw->pim & mask) {
252 chp_id_init(&ssd->chpid[i]);
253 ssd->chpid[i].id = pmcw->chpid[i];
254 }
255 }
256 }
257
258 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
259 {
260 int i;
261 int mask;
262
263 for (i = 0; i < 8; i++) {
264 mask = 0x80 >> i;
265 if (ssd->path_mask & mask)
266 if (!chp_is_registered(ssd->chpid[i]))
267 chp_new(ssd->chpid[i]);
268 }
269 }
270
271 void css_update_ssd_info(struct subchannel *sch)
272 {
273 int ret;
274
275 if (cio_is_console(sch->schid)) {
276 /* Console is initialized too early for functions requiring
277 * memory allocation. */
278 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
279 } else {
280 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
281 if (ret)
282 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
283 ssd_register_chpids(&sch->ssd_info);
284 }
285 }
286
287 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
288 char *buf)
289 {
290 struct subchannel *sch = to_subchannel(dev);
291
292 return sprintf(buf, "%01x\n", sch->st);
293 }
294
295 static DEVICE_ATTR(type, 0444, type_show, NULL);
296
297 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
298 char *buf)
299 {
300 struct subchannel *sch = to_subchannel(dev);
301
302 return sprintf(buf, "css:t%01X\n", sch->st);
303 }
304
305 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
306
307 static struct attribute *subch_attrs[] = {
308 &dev_attr_type.attr,
309 &dev_attr_modalias.attr,
310 NULL,
311 };
312
313 static struct attribute_group subch_attr_group = {
314 .attrs = subch_attrs,
315 };
316
317 static const struct attribute_group *default_subch_attr_groups[] = {
318 &subch_attr_group,
319 NULL,
320 };
321
322 static int css_register_subchannel(struct subchannel *sch)
323 {
324 int ret;
325
326 /* Initialize the subchannel structure */
327 sch->dev.parent = &channel_subsystems[0]->device;
328 sch->dev.bus = &css_bus_type;
329 sch->dev.release = &css_subchannel_release;
330 sch->dev.groups = default_subch_attr_groups;
331 /*
332 * We don't want to generate uevents for I/O subchannels that don't
333 * have a working ccw device behind them since they will be
334 * unregistered before they can be used anyway, so we delay the add
335 * uevent until after device recognition was successful.
336 * Note that we suppress the uevent for all subchannel types;
337 * the subchannel driver can decide itself when it wants to inform
338 * userspace of its existence.
339 */
340 dev_set_uevent_suppress(&sch->dev, 1);
341 css_update_ssd_info(sch);
342 /* make it known to the system */
343 ret = css_sch_device_register(sch);
344 if (ret) {
345 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
346 sch->schid.ssid, sch->schid.sch_no, ret);
347 return ret;
348 }
349 if (!sch->driver) {
350 /*
351 * No driver matched. Generate the uevent now so that
352 * a fitting driver module may be loaded based on the
353 * modalias.
354 */
355 dev_set_uevent_suppress(&sch->dev, 0);
356 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
357 }
358 return ret;
359 }
360
361 int css_probe_device(struct subchannel_id schid)
362 {
363 int ret;
364 struct subchannel *sch;
365
366 if (cio_is_console(schid))
367 sch = cio_get_console_subchannel();
368 else {
369 sch = css_alloc_subchannel(schid);
370 if (IS_ERR(sch))
371 return PTR_ERR(sch);
372 }
373 ret = css_register_subchannel(sch);
374 if (ret) {
375 if (!cio_is_console(schid))
376 put_device(&sch->dev);
377 }
378 return ret;
379 }
380
381 static int
382 check_subchannel(struct device * dev, void * data)
383 {
384 struct subchannel *sch;
385 struct subchannel_id *schid = data;
386
387 sch = to_subchannel(dev);
388 return schid_equal(&sch->schid, schid);
389 }
390
391 struct subchannel *
392 get_subchannel_by_schid(struct subchannel_id schid)
393 {
394 struct device *dev;
395
396 dev = bus_find_device(&css_bus_type, NULL,
397 &schid, check_subchannel);
398
399 return dev ? to_subchannel(dev) : NULL;
400 }
401
402 /**
403 * css_sch_is_valid() - check if a subchannel is valid
404 * @schib: subchannel information block for the subchannel
405 */
406 int css_sch_is_valid(struct schib *schib)
407 {
408 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
409 return 0;
410 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
411 return 0;
412 return 1;
413 }
414 EXPORT_SYMBOL_GPL(css_sch_is_valid);
415
416 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
417 {
418 struct schib schib;
419
420 if (!slow) {
421 /* Will be done on the slow path. */
422 return -EAGAIN;
423 }
424 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
425 /* Unusable - ignore. */
426 return 0;
427 }
428 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
429 schid.sch_no);
430
431 return css_probe_device(schid);
432 }
433
434 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
435 {
436 int ret = 0;
437
438 if (sch->driver) {
439 if (sch->driver->sch_event)
440 ret = sch->driver->sch_event(sch, slow);
441 else
442 dev_dbg(&sch->dev,
443 "Got subchannel machine check but "
444 "no sch_event handler provided.\n");
445 }
446 if (ret != 0 && ret != -EAGAIN) {
447 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
448 sch->schid.ssid, sch->schid.sch_no, ret);
449 }
450 return ret;
451 }
452
453 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
454 {
455 struct subchannel *sch;
456 int ret;
457
458 sch = get_subchannel_by_schid(schid);
459 if (sch) {
460 ret = css_evaluate_known_subchannel(sch, slow);
461 put_device(&sch->dev);
462 } else
463 ret = css_evaluate_new_subchannel(schid, slow);
464 if (ret == -EAGAIN)
465 css_schedule_eval(schid);
466 }
467
468 static struct idset *slow_subchannel_set;
469 static spinlock_t slow_subchannel_lock;
470 static wait_queue_head_t css_eval_wq;
471 static atomic_t css_eval_scheduled;
472
473 static int __init slow_subchannel_init(void)
474 {
475 spin_lock_init(&slow_subchannel_lock);
476 atomic_set(&css_eval_scheduled, 0);
477 init_waitqueue_head(&css_eval_wq);
478 slow_subchannel_set = idset_sch_new();
479 if (!slow_subchannel_set) {
480 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
481 return -ENOMEM;
482 }
483 return 0;
484 }
485
486 static int slow_eval_known_fn(struct subchannel *sch, void *data)
487 {
488 int eval;
489 int rc;
490
491 spin_lock_irq(&slow_subchannel_lock);
492 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
493 idset_sch_del(slow_subchannel_set, sch->schid);
494 spin_unlock_irq(&slow_subchannel_lock);
495 if (eval) {
496 rc = css_evaluate_known_subchannel(sch, 1);
497 if (rc == -EAGAIN)
498 css_schedule_eval(sch->schid);
499 }
500 return 0;
501 }
502
503 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
504 {
505 int eval;
506 int rc = 0;
507
508 spin_lock_irq(&slow_subchannel_lock);
509 eval = idset_sch_contains(slow_subchannel_set, schid);
510 idset_sch_del(slow_subchannel_set, schid);
511 spin_unlock_irq(&slow_subchannel_lock);
512 if (eval) {
513 rc = css_evaluate_new_subchannel(schid, 1);
514 switch (rc) {
515 case -EAGAIN:
516 css_schedule_eval(schid);
517 rc = 0;
518 break;
519 case -ENXIO:
520 case -ENOMEM:
521 case -EIO:
522 /* These should abort looping */
523 break;
524 default:
525 rc = 0;
526 }
527 }
528 return rc;
529 }
530
531 static void css_slow_path_func(struct work_struct *unused)
532 {
533 unsigned long flags;
534
535 CIO_TRACE_EVENT(4, "slowpath");
536 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
537 NULL);
538 spin_lock_irqsave(&slow_subchannel_lock, flags);
539 if (idset_is_empty(slow_subchannel_set)) {
540 atomic_set(&css_eval_scheduled, 0);
541 wake_up(&css_eval_wq);
542 }
543 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
544 }
545
546 static DECLARE_WORK(slow_path_work, css_slow_path_func);
547 struct workqueue_struct *cio_work_q;
548
549 void css_schedule_eval(struct subchannel_id schid)
550 {
551 unsigned long flags;
552
553 spin_lock_irqsave(&slow_subchannel_lock, flags);
554 idset_sch_add(slow_subchannel_set, schid);
555 atomic_set(&css_eval_scheduled, 1);
556 queue_work(cio_work_q, &slow_path_work);
557 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
558 }
559
560 void css_schedule_eval_all(void)
561 {
562 unsigned long flags;
563
564 spin_lock_irqsave(&slow_subchannel_lock, flags);
565 idset_fill(slow_subchannel_set);
566 atomic_set(&css_eval_scheduled, 1);
567 queue_work(cio_work_q, &slow_path_work);
568 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
569 }
570
571 static int __unset_registered(struct device *dev, void *data)
572 {
573 struct idset *set = data;
574 struct subchannel *sch = to_subchannel(dev);
575
576 idset_sch_del(set, sch->schid);
577 return 0;
578 }
579
580 void css_schedule_eval_all_unreg(void)
581 {
582 unsigned long flags;
583 struct idset *unreg_set;
584
585 /* Find unregistered subchannels. */
586 unreg_set = idset_sch_new();
587 if (!unreg_set) {
588 /* Fallback. */
589 css_schedule_eval_all();
590 return;
591 }
592 idset_fill(unreg_set);
593 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
594 /* Apply to slow_subchannel_set. */
595 spin_lock_irqsave(&slow_subchannel_lock, flags);
596 idset_add_set(slow_subchannel_set, unreg_set);
597 atomic_set(&css_eval_scheduled, 1);
598 queue_work(cio_work_q, &slow_path_work);
599 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
600 idset_free(unreg_set);
601 }
602
603 void css_wait_for_slow_path(void)
604 {
605 flush_workqueue(cio_work_q);
606 }
607
608 /* Schedule reprobing of all unregistered subchannels. */
609 void css_schedule_reprobe(void)
610 {
611 css_schedule_eval_all_unreg();
612 }
613 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
614
615 /*
616 * Called from the machine check handler for subchannel report words.
617 */
618 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
619 {
620 struct subchannel_id mchk_schid;
621
622 if (overflow) {
623 css_schedule_eval_all();
624 return;
625 }
626 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
627 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
628 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
629 crw0->erc, crw0->rsid);
630 if (crw1)
631 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
632 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
633 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
634 crw1->anc, crw1->erc, crw1->rsid);
635 init_subchannel_id(&mchk_schid);
636 mchk_schid.sch_no = crw0->rsid;
637 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
639
640 /*
641 * Since we are always presented with IPI in the CRW, we have to
642 * use stsch() to find out if the subchannel in question has come
643 * or gone.
644 */
645 css_evaluate_subchannel(mchk_schid, 0);
646 }
647
648 static void __init
649 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
650 {
651 struct cpuid cpu_id;
652
653 if (css_general_characteristics.mcss) {
654 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
656 } else {
657 #ifdef CONFIG_SMP
658 css->global_pgid.pgid_high.cpu_addr = stap();
659 #else
660 css->global_pgid.pgid_high.cpu_addr = 0;
661 #endif
662 }
663 get_cpu_id(&cpu_id);
664 css->global_pgid.cpu_id = cpu_id.ident;
665 css->global_pgid.cpu_model = cpu_id.machine;
666 css->global_pgid.tod_high = tod_high;
667
668 }
669
670 static void
671 channel_subsystem_release(struct device *dev)
672 {
673 struct channel_subsystem *css;
674
675 css = to_css(dev);
676 mutex_destroy(&css->mutex);
677 if (css->pseudo_subchannel) {
678 /* Implies that it has been generated but never registered. */
679 css_subchannel_release(&css->pseudo_subchannel->dev);
680 css->pseudo_subchannel = NULL;
681 }
682 kfree(css);
683 }
684
685 static ssize_t
686 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
687 char *buf)
688 {
689 struct channel_subsystem *css = to_css(dev);
690 int ret;
691
692 if (!css)
693 return 0;
694 mutex_lock(&css->mutex);
695 ret = sprintf(buf, "%x\n", css->cm_enabled);
696 mutex_unlock(&css->mutex);
697 return ret;
698 }
699
700 static ssize_t
701 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
702 const char *buf, size_t count)
703 {
704 struct channel_subsystem *css = to_css(dev);
705 int ret;
706 unsigned long val;
707
708 ret = strict_strtoul(buf, 16, &val);
709 if (ret)
710 return ret;
711 mutex_lock(&css->mutex);
712 switch (val) {
713 case 0:
714 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
715 break;
716 case 1:
717 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
718 break;
719 default:
720 ret = -EINVAL;
721 }
722 mutex_unlock(&css->mutex);
723 return ret < 0 ? ret : count;
724 }
725
726 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
727
728 static int __init setup_css(int nr)
729 {
730 u32 tod_high;
731 int ret;
732 struct channel_subsystem *css;
733
734 css = channel_subsystems[nr];
735 memset(css, 0, sizeof(struct channel_subsystem));
736 css->pseudo_subchannel =
737 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
738 if (!css->pseudo_subchannel)
739 return -ENOMEM;
740 css->pseudo_subchannel->dev.parent = &css->device;
741 css->pseudo_subchannel->dev.release = css_subchannel_release;
742 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
743 mutex_init(&css->pseudo_subchannel->reg_mutex);
744 ret = cio_create_sch_lock(css->pseudo_subchannel);
745 if (ret) {
746 kfree(css->pseudo_subchannel);
747 return ret;
748 }
749 mutex_init(&css->mutex);
750 css->valid = 1;
751 css->cssid = nr;
752 dev_set_name(&css->device, "css%x", nr);
753 css->device.release = channel_subsystem_release;
754 tod_high = (u32) (get_clock() >> 32);
755 css_generate_pgid(css, tod_high);
756 return 0;
757 }
758
759 static int css_reboot_event(struct notifier_block *this,
760 unsigned long event,
761 void *ptr)
762 {
763 int ret, i;
764
765 ret = NOTIFY_DONE;
766 for (i = 0; i <= __MAX_CSSID; i++) {
767 struct channel_subsystem *css;
768
769 css = channel_subsystems[i];
770 mutex_lock(&css->mutex);
771 if (css->cm_enabled)
772 if (chsc_secm(css, 0))
773 ret = NOTIFY_BAD;
774 mutex_unlock(&css->mutex);
775 }
776
777 return ret;
778 }
779
780 static struct notifier_block css_reboot_notifier = {
781 .notifier_call = css_reboot_event,
782 };
783
784 /*
785 * Since the css devices are neither on a bus nor have a class
786 * nor have a special device type, we cannot stop/restart channel
787 * path measurements via the normal suspend/resume callbacks, but have
788 * to use notifiers.
789 */
790 static int css_power_event(struct notifier_block *this, unsigned long event,
791 void *ptr)
792 {
793 void *secm_area;
794 int ret, i;
795
796 switch (event) {
797 case PM_HIBERNATION_PREPARE:
798 case PM_SUSPEND_PREPARE:
799 ret = NOTIFY_DONE;
800 for (i = 0; i <= __MAX_CSSID; i++) {
801 struct channel_subsystem *css;
802
803 css = channel_subsystems[i];
804 mutex_lock(&css->mutex);
805 if (!css->cm_enabled) {
806 mutex_unlock(&css->mutex);
807 continue;
808 }
809 secm_area = (void *)get_zeroed_page(GFP_KERNEL |
810 GFP_DMA);
811 if (secm_area) {
812 if (__chsc_do_secm(css, 0, secm_area))
813 ret = NOTIFY_BAD;
814 free_page((unsigned long)secm_area);
815 } else
816 ret = NOTIFY_BAD;
817
818 mutex_unlock(&css->mutex);
819 }
820 break;
821 case PM_POST_HIBERNATION:
822 case PM_POST_SUSPEND:
823 ret = NOTIFY_DONE;
824 for (i = 0; i <= __MAX_CSSID; i++) {
825 struct channel_subsystem *css;
826
827 css = channel_subsystems[i];
828 mutex_lock(&css->mutex);
829 if (!css->cm_enabled) {
830 mutex_unlock(&css->mutex);
831 continue;
832 }
833 secm_area = (void *)get_zeroed_page(GFP_KERNEL |
834 GFP_DMA);
835 if (secm_area) {
836 if (__chsc_do_secm(css, 1, secm_area))
837 ret = NOTIFY_BAD;
838 free_page((unsigned long)secm_area);
839 } else
840 ret = NOTIFY_BAD;
841
842 mutex_unlock(&css->mutex);
843 }
844 /* search for subchannels, which appeared during hibernation */
845 css_schedule_reprobe();
846 break;
847 default:
848 ret = NOTIFY_DONE;
849 }
850 return ret;
851
852 }
853 static struct notifier_block css_power_notifier = {
854 .notifier_call = css_power_event,
855 };
856
857 /*
858 * Now that the driver core is running, we can setup our channel subsystem.
859 * The struct subchannel's are created during probing (except for the
860 * static console subchannel).
861 */
862 static int __init css_bus_init(void)
863 {
864 int ret, i;
865
866 ret = chsc_determine_css_characteristics();
867 if (ret == -ENOMEM)
868 goto out;
869
870 ret = chsc_alloc_sei_area();
871 if (ret)
872 goto out;
873
874 /* Try to enable MSS. */
875 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
876 if (ret)
877 max_ssid = 0;
878 else /* Success. */
879 max_ssid = __MAX_SSID;
880
881 ret = slow_subchannel_init();
882 if (ret)
883 goto out;
884
885 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
886 if (ret)
887 goto out;
888
889 if ((ret = bus_register(&css_bus_type)))
890 goto out;
891
892 /* Setup css structure. */
893 for (i = 0; i <= __MAX_CSSID; i++) {
894 struct channel_subsystem *css;
895
896 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
897 if (!css) {
898 ret = -ENOMEM;
899 goto out_unregister;
900 }
901 channel_subsystems[i] = css;
902 ret = setup_css(i);
903 if (ret) {
904 kfree(channel_subsystems[i]);
905 goto out_unregister;
906 }
907 ret = device_register(&css->device);
908 if (ret) {
909 put_device(&css->device);
910 goto out_unregister;
911 }
912 if (css_chsc_characteristics.secm) {
913 ret = device_create_file(&css->device,
914 &dev_attr_cm_enable);
915 if (ret)
916 goto out_device;
917 }
918 ret = device_register(&css->pseudo_subchannel->dev);
919 if (ret) {
920 put_device(&css->pseudo_subchannel->dev);
921 goto out_file;
922 }
923 }
924 ret = register_reboot_notifier(&css_reboot_notifier);
925 if (ret)
926 goto out_unregister;
927 ret = register_pm_notifier(&css_power_notifier);
928 if (ret) {
929 unregister_reboot_notifier(&css_reboot_notifier);
930 goto out_unregister;
931 }
932 css_init_done = 1;
933
934 /* Enable default isc for I/O subchannels. */
935 isc_register(IO_SCH_ISC);
936
937 return 0;
938 out_file:
939 if (css_chsc_characteristics.secm)
940 device_remove_file(&channel_subsystems[i]->device,
941 &dev_attr_cm_enable);
942 out_device:
943 device_unregister(&channel_subsystems[i]->device);
944 out_unregister:
945 while (i > 0) {
946 struct channel_subsystem *css;
947
948 i--;
949 css = channel_subsystems[i];
950 device_unregister(&css->pseudo_subchannel->dev);
951 css->pseudo_subchannel = NULL;
952 if (css_chsc_characteristics.secm)
953 device_remove_file(&css->device,
954 &dev_attr_cm_enable);
955 device_unregister(&css->device);
956 }
957 bus_unregister(&css_bus_type);
958 out:
959 crw_unregister_handler(CRW_RSC_CSS);
960 chsc_free_sei_area();
961 idset_free(slow_subchannel_set);
962 pr_alert("The CSS device driver initialization failed with "
963 "errno=%d\n", ret);
964 return ret;
965 }
966
967 static void __init css_bus_cleanup(void)
968 {
969 struct channel_subsystem *css;
970 int i;
971
972 for (i = 0; i <= __MAX_CSSID; i++) {
973 css = channel_subsystems[i];
974 device_unregister(&css->pseudo_subchannel->dev);
975 css->pseudo_subchannel = NULL;
976 if (css_chsc_characteristics.secm)
977 device_remove_file(&css->device, &dev_attr_cm_enable);
978 device_unregister(&css->device);
979 }
980 bus_unregister(&css_bus_type);
981 crw_unregister_handler(CRW_RSC_CSS);
982 chsc_free_sei_area();
983 idset_free(slow_subchannel_set);
984 isc_unregister(IO_SCH_ISC);
985 }
986
987 static int __init channel_subsystem_init(void)
988 {
989 int ret;
990
991 ret = css_bus_init();
992 if (ret)
993 return ret;
994 cio_work_q = create_singlethread_workqueue("cio");
995 if (!cio_work_q) {
996 ret = -ENOMEM;
997 goto out_bus;
998 }
999 ret = io_subchannel_init();
1000 if (ret)
1001 goto out_wq;
1002
1003 return ret;
1004 out_wq:
1005 destroy_workqueue(cio_work_q);
1006 out_bus:
1007 css_bus_cleanup();
1008 return ret;
1009 }
1010 subsys_initcall(channel_subsystem_init);
1011
1012 static int css_settle(struct device_driver *drv, void *unused)
1013 {
1014 struct css_driver *cssdrv = to_cssdriver(drv);
1015
1016 if (cssdrv->settle)
1017 return cssdrv->settle();
1018 return 0;
1019 }
1020
1021 int css_complete_work(void)
1022 {
1023 int ret;
1024
1025 /* Wait for the evaluation of subchannels to finish. */
1026 ret = wait_event_interruptible(css_eval_wq,
1027 atomic_read(&css_eval_scheduled) == 0);
1028 if (ret)
1029 return -EINTR;
1030 flush_workqueue(cio_work_q);
1031 /* Wait for the subchannel type specific initialization to finish */
1032 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1033 }
1034
1035
1036 /*
1037 * Wait for the initialization of devices to finish, to make sure we are
1038 * done with our setup if the search for the root device starts.
1039 */
1040 static int __init channel_subsystem_init_sync(void)
1041 {
1042 /* Start initial subchannel evaluation. */
1043 css_schedule_eval_all();
1044 css_complete_work();
1045 return 0;
1046 }
1047 subsys_initcall_sync(channel_subsystem_init_sync);
1048
1049 void channel_subsystem_reinit(void)
1050 {
1051 chsc_enable_facility(CHSC_SDA_OC_MSS);
1052 }
1053
1054 #ifdef CONFIG_PROC_FS
1055 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1056 size_t count, loff_t *ppos)
1057 {
1058 int ret;
1059
1060 /* Handle pending CRW's. */
1061 crw_wait_for_channel_report();
1062 ret = css_complete_work();
1063
1064 return ret ? ret : count;
1065 }
1066
1067 static const struct file_operations cio_settle_proc_fops = {
1068 .open = nonseekable_open,
1069 .write = cio_settle_write,
1070 .llseek = no_llseek,
1071 };
1072
1073 static int __init cio_settle_init(void)
1074 {
1075 struct proc_dir_entry *entry;
1076
1077 entry = proc_create("cio_settle", S_IWUSR, NULL,
1078 &cio_settle_proc_fops);
1079 if (!entry)
1080 return -ENOMEM;
1081 return 0;
1082 }
1083 device_initcall(cio_settle_init);
1084 #endif /*CONFIG_PROC_FS*/
1085
1086 int sch_is_pseudo_sch(struct subchannel *sch)
1087 {
1088 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1089 }
1090
1091 static int css_bus_match(struct device *dev, struct device_driver *drv)
1092 {
1093 struct subchannel *sch = to_subchannel(dev);
1094 struct css_driver *driver = to_cssdriver(drv);
1095 struct css_device_id *id;
1096
1097 for (id = driver->subchannel_type; id->match_flags; id++) {
1098 if (sch->st == id->type)
1099 return 1;
1100 }
1101
1102 return 0;
1103 }
1104
1105 static int css_probe(struct device *dev)
1106 {
1107 struct subchannel *sch;
1108 int ret;
1109
1110 sch = to_subchannel(dev);
1111 sch->driver = to_cssdriver(dev->driver);
1112 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1113 if (ret)
1114 sch->driver = NULL;
1115 return ret;
1116 }
1117
1118 static int css_remove(struct device *dev)
1119 {
1120 struct subchannel *sch;
1121 int ret;
1122
1123 sch = to_subchannel(dev);
1124 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1125 sch->driver = NULL;
1126 return ret;
1127 }
1128
1129 static void css_shutdown(struct device *dev)
1130 {
1131 struct subchannel *sch;
1132
1133 sch = to_subchannel(dev);
1134 if (sch->driver && sch->driver->shutdown)
1135 sch->driver->shutdown(sch);
1136 }
1137
1138 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1139 {
1140 struct subchannel *sch = to_subchannel(dev);
1141 int ret;
1142
1143 ret = add_uevent_var(env, "ST=%01X", sch->st);
1144 if (ret)
1145 return ret;
1146 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1147 return ret;
1148 }
1149
1150 static int css_pm_prepare(struct device *dev)
1151 {
1152 struct subchannel *sch = to_subchannel(dev);
1153 struct css_driver *drv;
1154
1155 if (mutex_is_locked(&sch->reg_mutex))
1156 return -EAGAIN;
1157 if (!sch->dev.driver)
1158 return 0;
1159 drv = to_cssdriver(sch->dev.driver);
1160 /* Notify drivers that they may not register children. */
1161 return drv->prepare ? drv->prepare(sch) : 0;
1162 }
1163
1164 static void css_pm_complete(struct device *dev)
1165 {
1166 struct subchannel *sch = to_subchannel(dev);
1167 struct css_driver *drv;
1168
1169 if (!sch->dev.driver)
1170 return;
1171 drv = to_cssdriver(sch->dev.driver);
1172 if (drv->complete)
1173 drv->complete(sch);
1174 }
1175
1176 static int css_pm_freeze(struct device *dev)
1177 {
1178 struct subchannel *sch = to_subchannel(dev);
1179 struct css_driver *drv;
1180
1181 if (!sch->dev.driver)
1182 return 0;
1183 drv = to_cssdriver(sch->dev.driver);
1184 return drv->freeze ? drv->freeze(sch) : 0;
1185 }
1186
1187 static int css_pm_thaw(struct device *dev)
1188 {
1189 struct subchannel *sch = to_subchannel(dev);
1190 struct css_driver *drv;
1191
1192 if (!sch->dev.driver)
1193 return 0;
1194 drv = to_cssdriver(sch->dev.driver);
1195 return drv->thaw ? drv->thaw(sch) : 0;
1196 }
1197
1198 static int css_pm_restore(struct device *dev)
1199 {
1200 struct subchannel *sch = to_subchannel(dev);
1201 struct css_driver *drv;
1202
1203 if (!sch->dev.driver)
1204 return 0;
1205 drv = to_cssdriver(sch->dev.driver);
1206 return drv->restore ? drv->restore(sch) : 0;
1207 }
1208
1209 static const struct dev_pm_ops css_pm_ops = {
1210 .prepare = css_pm_prepare,
1211 .complete = css_pm_complete,
1212 .freeze = css_pm_freeze,
1213 .thaw = css_pm_thaw,
1214 .restore = css_pm_restore,
1215 };
1216
1217 struct bus_type css_bus_type = {
1218 .name = "css",
1219 .match = css_bus_match,
1220 .probe = css_probe,
1221 .remove = css_remove,
1222 .shutdown = css_shutdown,
1223 .uevent = css_uevent,
1224 .pm = &css_pm_ops,
1225 };
1226
1227 /**
1228 * css_driver_register - register a css driver
1229 * @cdrv: css driver to register
1230 *
1231 * This is mainly a wrapper around driver_register that sets name
1232 * and bus_type in the embedded struct device_driver correctly.
1233 */
1234 int css_driver_register(struct css_driver *cdrv)
1235 {
1236 cdrv->drv.name = cdrv->name;
1237 cdrv->drv.bus = &css_bus_type;
1238 cdrv->drv.owner = cdrv->owner;
1239 return driver_register(&cdrv->drv);
1240 }
1241 EXPORT_SYMBOL_GPL(css_driver_register);
1242
1243 /**
1244 * css_driver_unregister - unregister a css driver
1245 * @cdrv: css driver to unregister
1246 *
1247 * This is a wrapper around driver_unregister.
1248 */
1249 void css_driver_unregister(struct css_driver *cdrv)
1250 {
1251 driver_unregister(&cdrv->drv);
1252 }
1253 EXPORT_SYMBOL_GPL(css_driver_unregister);
1254
1255 MODULE_LICENSE("GPL");
1256 EXPORT_SYMBOL(css_bus_type);
This page took 0.062908 seconds and 5 git commands to generate.