Merge tag 'xtensa-for-next-20140221-1' into for_next
[deliverable/linux.git] / drivers / s390 / crypto / ap_bus.c
1 /*
2 * Copyright IBM Corp. 2006, 2012
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com>
6 * Felix Beck <felix.beck@de.ibm.com>
7 * Holger Dengler <hd@linux.vnet.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26 #define KMSG_COMPONENT "ap"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29 #include <linux/kernel_stat.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/slab.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/mutex.h>
40 #include <asm/reset.h>
41 #include <asm/airq.h>
42 #include <linux/atomic.h>
43 #include <asm/isc.h>
44 #include <linux/hrtimer.h>
45 #include <linux/ktime.h>
46 #include <asm/facility.h>
47
48 #include "ap_bus.h"
49
50 /* Some prototypes. */
51 static void ap_scan_bus(struct work_struct *);
52 static void ap_poll_all(unsigned long);
53 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
54 static int ap_poll_thread_start(void);
55 static void ap_poll_thread_stop(void);
56 static void ap_request_timeout(unsigned long);
57 static inline void ap_schedule_poll_timer(void);
58 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
59 static int ap_device_remove(struct device *dev);
60 static int ap_device_probe(struct device *dev);
61 static void ap_interrupt_handler(struct airq_struct *airq);
62 static void ap_reset(struct ap_device *ap_dev);
63 static void ap_config_timeout(unsigned long ptr);
64 static int ap_select_domain(void);
65 static void ap_query_configuration(void);
66
67 /*
68 * Module description.
69 */
70 MODULE_AUTHOR("IBM Corporation");
71 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
72 "Copyright IBM Corp. 2006, 2012");
73 MODULE_LICENSE("GPL");
74 MODULE_ALIAS("z90crypt");
75
76 /*
77 * Module parameter
78 */
79 int ap_domain_index = -1; /* Adjunct Processor Domain Index */
80 module_param_named(domain, ap_domain_index, int, 0000);
81 MODULE_PARM_DESC(domain, "domain index for ap devices");
82 EXPORT_SYMBOL(ap_domain_index);
83
84 static int ap_thread_flag = 0;
85 module_param_named(poll_thread, ap_thread_flag, int, 0000);
86 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
87
88 static struct device *ap_root_device = NULL;
89 static struct ap_config_info *ap_configuration;
90 static DEFINE_SPINLOCK(ap_device_list_lock);
91 static LIST_HEAD(ap_device_list);
92
93 /*
94 * Workqueue & timer for bus rescan.
95 */
96 static struct workqueue_struct *ap_work_queue;
97 static struct timer_list ap_config_timer;
98 static int ap_config_time = AP_CONFIG_TIME;
99 static DECLARE_WORK(ap_config_work, ap_scan_bus);
100
101 /*
102 * Tasklet & timer for AP request polling and interrupts
103 */
104 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
105 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
106 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
107 static struct task_struct *ap_poll_kthread = NULL;
108 static DEFINE_MUTEX(ap_poll_thread_mutex);
109 static DEFINE_SPINLOCK(ap_poll_timer_lock);
110 static struct hrtimer ap_poll_timer;
111 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
112 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
113 static unsigned long long poll_timeout = 250000;
114
115 /* Suspend flag */
116 static int ap_suspend_flag;
117 /* Flag to check if domain was set through module parameter domain=. This is
118 * important when supsend and resume is done in a z/VM environment where the
119 * domain might change. */
120 static int user_set_domain = 0;
121 static struct bus_type ap_bus_type;
122
123 /* Adapter interrupt definitions */
124 static int ap_airq_flag;
125
126 static struct airq_struct ap_airq = {
127 .handler = ap_interrupt_handler,
128 .isc = AP_ISC,
129 };
130
131 /**
132 * ap_using_interrupts() - Returns non-zero if interrupt support is
133 * available.
134 */
135 static inline int ap_using_interrupts(void)
136 {
137 return ap_airq_flag;
138 }
139
140 /**
141 * ap_intructions_available() - Test if AP instructions are available.
142 *
143 * Returns 0 if the AP instructions are installed.
144 */
145 static inline int ap_instructions_available(void)
146 {
147 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
148 register unsigned long reg1 asm ("1") = -ENODEV;
149 register unsigned long reg2 asm ("2") = 0UL;
150
151 asm volatile(
152 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
153 "0: la %1,0\n"
154 "1:\n"
155 EX_TABLE(0b, 1b)
156 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
157 return reg1;
158 }
159
160 /**
161 * ap_interrupts_available(): Test if AP interrupts are available.
162 *
163 * Returns 1 if AP interrupts are available.
164 */
165 static int ap_interrupts_available(void)
166 {
167 return test_facility(2) && test_facility(65);
168 }
169
170 /**
171 * ap_configuration_available(): Test if AP configuration
172 * information is available.
173 *
174 * Returns 1 if AP configuration information is available.
175 */
176 #ifdef CONFIG_64BIT
177 static int ap_configuration_available(void)
178 {
179 return test_facility(2) && test_facility(12);
180 }
181 #endif
182
183 /**
184 * ap_test_queue(): Test adjunct processor queue.
185 * @qid: The AP queue number
186 * @queue_depth: Pointer to queue depth value
187 * @device_type: Pointer to device type value
188 *
189 * Returns AP queue status structure.
190 */
191 static inline struct ap_queue_status
192 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
193 {
194 register unsigned long reg0 asm ("0") = qid;
195 register struct ap_queue_status reg1 asm ("1");
196 register unsigned long reg2 asm ("2") = 0UL;
197
198 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
199 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
200 *device_type = (int) (reg2 >> 24);
201 *queue_depth = (int) (reg2 & 0xff);
202 return reg1;
203 }
204
205 /**
206 * ap_reset_queue(): Reset adjunct processor queue.
207 * @qid: The AP queue number
208 *
209 * Returns AP queue status structure.
210 */
211 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
212 {
213 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
214 register struct ap_queue_status reg1 asm ("1");
215 register unsigned long reg2 asm ("2") = 0UL;
216
217 asm volatile(
218 ".long 0xb2af0000" /* PQAP(RAPQ) */
219 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
220 return reg1;
221 }
222
223 #ifdef CONFIG_64BIT
224 /**
225 * ap_queue_interruption_control(): Enable interruption for a specific AP.
226 * @qid: The AP queue number
227 * @ind: The notification indicator byte
228 *
229 * Returns AP queue status.
230 */
231 static inline struct ap_queue_status
232 ap_queue_interruption_control(ap_qid_t qid, void *ind)
233 {
234 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
235 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
236 register struct ap_queue_status reg1_out asm ("1");
237 register void *reg2 asm ("2") = ind;
238 asm volatile(
239 ".long 0xb2af0000" /* PQAP(AQIC) */
240 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
241 :
242 : "cc" );
243 return reg1_out;
244 }
245 #endif
246
247 #ifdef CONFIG_64BIT
248 static inline struct ap_queue_status
249 __ap_query_functions(ap_qid_t qid, unsigned int *functions)
250 {
251 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
252 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
253 register unsigned long reg2 asm ("2");
254
255 asm volatile(
256 ".long 0xb2af0000\n" /* PQAP(TAPQ) */
257 "0:\n"
258 EX_TABLE(0b, 0b)
259 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
260 :
261 : "cc");
262
263 *functions = (unsigned int)(reg2 >> 32);
264 return reg1;
265 }
266 #endif
267
268 #ifdef CONFIG_64BIT
269 static inline int __ap_query_configuration(struct ap_config_info *config)
270 {
271 register unsigned long reg0 asm ("0") = 0x04000000UL;
272 register unsigned long reg1 asm ("1") = -EINVAL;
273 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
274
275 asm volatile(
276 ".long 0xb2af0000\n" /* PQAP(QCI) */
277 "0: la %1,0\n"
278 "1:\n"
279 EX_TABLE(0b, 1b)
280 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
281 :
282 : "cc");
283
284 return reg1;
285 }
286 #endif
287
288 /**
289 * ap_query_functions(): Query supported functions.
290 * @qid: The AP queue number
291 * @functions: Pointer to functions field.
292 *
293 * Returns
294 * 0 on success.
295 * -ENODEV if queue not valid.
296 * -EBUSY if device busy.
297 * -EINVAL if query function is not supported
298 */
299 static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
300 {
301 #ifdef CONFIG_64BIT
302 struct ap_queue_status status;
303 int i;
304 status = __ap_query_functions(qid, functions);
305
306 for (i = 0; i < AP_MAX_RESET; i++) {
307 if (ap_queue_status_invalid_test(&status))
308 return -ENODEV;
309
310 switch (status.response_code) {
311 case AP_RESPONSE_NORMAL:
312 return 0;
313 case AP_RESPONSE_RESET_IN_PROGRESS:
314 case AP_RESPONSE_BUSY:
315 break;
316 case AP_RESPONSE_Q_NOT_AVAIL:
317 case AP_RESPONSE_DECONFIGURED:
318 case AP_RESPONSE_CHECKSTOPPED:
319 case AP_RESPONSE_INVALID_ADDRESS:
320 return -ENODEV;
321 case AP_RESPONSE_OTHERWISE_CHANGED:
322 break;
323 default:
324 break;
325 }
326 if (i < AP_MAX_RESET - 1) {
327 udelay(5);
328 status = __ap_query_functions(qid, functions);
329 }
330 }
331 return -EBUSY;
332 #else
333 return -EINVAL;
334 #endif
335 }
336
337 /**
338 * ap_queue_enable_interruption(): Enable interruption on an AP.
339 * @qid: The AP queue number
340 * @ind: the notification indicator byte
341 *
342 * Enables interruption on AP queue via ap_queue_interruption_control(). Based
343 * on the return value it waits a while and tests the AP queue if interrupts
344 * have been switched on using ap_test_queue().
345 */
346 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
347 {
348 #ifdef CONFIG_64BIT
349 struct ap_queue_status status;
350 int t_depth, t_device_type, rc, i;
351
352 rc = -EBUSY;
353 status = ap_queue_interruption_control(qid, ind);
354
355 for (i = 0; i < AP_MAX_RESET; i++) {
356 switch (status.response_code) {
357 case AP_RESPONSE_NORMAL:
358 if (status.int_enabled)
359 return 0;
360 break;
361 case AP_RESPONSE_RESET_IN_PROGRESS:
362 case AP_RESPONSE_BUSY:
363 if (i < AP_MAX_RESET - 1) {
364 udelay(5);
365 status = ap_queue_interruption_control(qid,
366 ind);
367 continue;
368 }
369 break;
370 case AP_RESPONSE_Q_NOT_AVAIL:
371 case AP_RESPONSE_DECONFIGURED:
372 case AP_RESPONSE_CHECKSTOPPED:
373 case AP_RESPONSE_INVALID_ADDRESS:
374 return -ENODEV;
375 case AP_RESPONSE_OTHERWISE_CHANGED:
376 if (status.int_enabled)
377 return 0;
378 break;
379 default:
380 break;
381 }
382 if (i < AP_MAX_RESET - 1) {
383 udelay(5);
384 status = ap_test_queue(qid, &t_depth, &t_device_type);
385 }
386 }
387 return rc;
388 #else
389 return -EINVAL;
390 #endif
391 }
392
393 /**
394 * __ap_send(): Send message to adjunct processor queue.
395 * @qid: The AP queue number
396 * @psmid: The program supplied message identifier
397 * @msg: The message text
398 * @length: The message length
399 * @special: Special Bit
400 *
401 * Returns AP queue status structure.
402 * Condition code 1 on NQAP can't happen because the L bit is 1.
403 * Condition code 2 on NQAP also means the send is incomplete,
404 * because a segment boundary was reached. The NQAP is repeated.
405 */
406 static inline struct ap_queue_status
407 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
408 unsigned int special)
409 {
410 typedef struct { char _[length]; } msgblock;
411 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
412 register struct ap_queue_status reg1 asm ("1");
413 register unsigned long reg2 asm ("2") = (unsigned long) msg;
414 register unsigned long reg3 asm ("3") = (unsigned long) length;
415 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
416 register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
417
418 if (special == 1)
419 reg0 |= 0x400000UL;
420
421 asm volatile (
422 "0: .long 0xb2ad0042\n" /* NQAP */
423 " brc 2,0b"
424 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
425 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
426 : "cc" );
427 return reg1;
428 }
429
430 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
431 {
432 struct ap_queue_status status;
433
434 status = __ap_send(qid, psmid, msg, length, 0);
435 switch (status.response_code) {
436 case AP_RESPONSE_NORMAL:
437 return 0;
438 case AP_RESPONSE_Q_FULL:
439 case AP_RESPONSE_RESET_IN_PROGRESS:
440 return -EBUSY;
441 case AP_RESPONSE_REQ_FAC_NOT_INST:
442 return -EINVAL;
443 default: /* Device is gone. */
444 return -ENODEV;
445 }
446 }
447 EXPORT_SYMBOL(ap_send);
448
449 /**
450 * __ap_recv(): Receive message from adjunct processor queue.
451 * @qid: The AP queue number
452 * @psmid: Pointer to program supplied message identifier
453 * @msg: The message text
454 * @length: The message length
455 *
456 * Returns AP queue status structure.
457 * Condition code 1 on DQAP means the receive has taken place
458 * but only partially. The response is incomplete, hence the
459 * DQAP is repeated.
460 * Condition code 2 on DQAP also means the receive is incomplete,
461 * this time because a segment boundary was reached. Again, the
462 * DQAP is repeated.
463 * Note that gpr2 is used by the DQAP instruction to keep track of
464 * any 'residual' length, in case the instruction gets interrupted.
465 * Hence it gets zeroed before the instruction.
466 */
467 static inline struct ap_queue_status
468 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
469 {
470 typedef struct { char _[length]; } msgblock;
471 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
472 register struct ap_queue_status reg1 asm ("1");
473 register unsigned long reg2 asm("2") = 0UL;
474 register unsigned long reg4 asm("4") = (unsigned long) msg;
475 register unsigned long reg5 asm("5") = (unsigned long) length;
476 register unsigned long reg6 asm("6") = 0UL;
477 register unsigned long reg7 asm("7") = 0UL;
478
479
480 asm volatile(
481 "0: .long 0xb2ae0064\n" /* DQAP */
482 " brc 6,0b\n"
483 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
484 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
485 "=m" (*(msgblock *) msg) : : "cc" );
486 *psmid = (((unsigned long long) reg6) << 32) + reg7;
487 return reg1;
488 }
489
490 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
491 {
492 struct ap_queue_status status;
493
494 status = __ap_recv(qid, psmid, msg, length);
495 switch (status.response_code) {
496 case AP_RESPONSE_NORMAL:
497 return 0;
498 case AP_RESPONSE_NO_PENDING_REPLY:
499 if (status.queue_empty)
500 return -ENOENT;
501 return -EBUSY;
502 case AP_RESPONSE_RESET_IN_PROGRESS:
503 return -EBUSY;
504 default:
505 return -ENODEV;
506 }
507 }
508 EXPORT_SYMBOL(ap_recv);
509
510 /**
511 * ap_query_queue(): Check if an AP queue is available.
512 * @qid: The AP queue number
513 * @queue_depth: Pointer to queue depth value
514 * @device_type: Pointer to device type value
515 *
516 * The test is repeated for AP_MAX_RESET times.
517 */
518 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
519 {
520 struct ap_queue_status status;
521 int t_depth, t_device_type, rc, i;
522
523 rc = -EBUSY;
524 for (i = 0; i < AP_MAX_RESET; i++) {
525 status = ap_test_queue(qid, &t_depth, &t_device_type);
526 switch (status.response_code) {
527 case AP_RESPONSE_NORMAL:
528 *queue_depth = t_depth + 1;
529 *device_type = t_device_type;
530 rc = 0;
531 break;
532 case AP_RESPONSE_Q_NOT_AVAIL:
533 rc = -ENODEV;
534 break;
535 case AP_RESPONSE_RESET_IN_PROGRESS:
536 break;
537 case AP_RESPONSE_DECONFIGURED:
538 rc = -ENODEV;
539 break;
540 case AP_RESPONSE_CHECKSTOPPED:
541 rc = -ENODEV;
542 break;
543 case AP_RESPONSE_INVALID_ADDRESS:
544 rc = -ENODEV;
545 break;
546 case AP_RESPONSE_OTHERWISE_CHANGED:
547 break;
548 case AP_RESPONSE_BUSY:
549 break;
550 default:
551 BUG();
552 }
553 if (rc != -EBUSY)
554 break;
555 if (i < AP_MAX_RESET - 1)
556 udelay(5);
557 }
558 return rc;
559 }
560
561 /**
562 * ap_init_queue(): Reset an AP queue.
563 * @qid: The AP queue number
564 *
565 * Reset an AP queue and wait for it to become available again.
566 */
567 static int ap_init_queue(ap_qid_t qid)
568 {
569 struct ap_queue_status status;
570 int rc, dummy, i;
571
572 rc = -ENODEV;
573 status = ap_reset_queue(qid);
574 for (i = 0; i < AP_MAX_RESET; i++) {
575 switch (status.response_code) {
576 case AP_RESPONSE_NORMAL:
577 if (status.queue_empty)
578 rc = 0;
579 break;
580 case AP_RESPONSE_Q_NOT_AVAIL:
581 case AP_RESPONSE_DECONFIGURED:
582 case AP_RESPONSE_CHECKSTOPPED:
583 i = AP_MAX_RESET; /* return with -ENODEV */
584 break;
585 case AP_RESPONSE_RESET_IN_PROGRESS:
586 rc = -EBUSY;
587 case AP_RESPONSE_BUSY:
588 default:
589 break;
590 }
591 if (rc != -ENODEV && rc != -EBUSY)
592 break;
593 if (i < AP_MAX_RESET - 1) {
594 /* Time we are waiting until we give up (0.7sec * 90).
595 * Since the actual request (in progress) will not
596 * interrupted immediately for the reset command,
597 * we have to be patient. In worst case we have to
598 * wait 60sec + reset time (some msec).
599 */
600 schedule_timeout(AP_RESET_TIMEOUT);
601 status = ap_test_queue(qid, &dummy, &dummy);
602 }
603 }
604 if (rc == 0 && ap_using_interrupts()) {
605 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr);
606 /* If interruption mode is supported by the machine,
607 * but an AP can not be enabled for interruption then
608 * the AP will be discarded. */
609 if (rc)
610 pr_err("Registering adapter interrupts for "
611 "AP %d failed\n", AP_QID_DEVICE(qid));
612 }
613 return rc;
614 }
615
616 /**
617 * ap_increase_queue_count(): Arm request timeout.
618 * @ap_dev: Pointer to an AP device.
619 *
620 * Arm request timeout if an AP device was idle and a new request is submitted.
621 */
622 static void ap_increase_queue_count(struct ap_device *ap_dev)
623 {
624 int timeout = ap_dev->drv->request_timeout;
625
626 ap_dev->queue_count++;
627 if (ap_dev->queue_count == 1) {
628 mod_timer(&ap_dev->timeout, jiffies + timeout);
629 ap_dev->reset = AP_RESET_ARMED;
630 }
631 }
632
633 /**
634 * ap_decrease_queue_count(): Decrease queue count.
635 * @ap_dev: Pointer to an AP device.
636 *
637 * If AP device is still alive, re-schedule request timeout if there are still
638 * pending requests.
639 */
640 static void ap_decrease_queue_count(struct ap_device *ap_dev)
641 {
642 int timeout = ap_dev->drv->request_timeout;
643
644 ap_dev->queue_count--;
645 if (ap_dev->queue_count > 0)
646 mod_timer(&ap_dev->timeout, jiffies + timeout);
647 else
648 /*
649 * The timeout timer should to be disabled now - since
650 * del_timer_sync() is very expensive, we just tell via the
651 * reset flag to ignore the pending timeout timer.
652 */
653 ap_dev->reset = AP_RESET_IGNORE;
654 }
655
656 /*
657 * AP device related attributes.
658 */
659 static ssize_t ap_hwtype_show(struct device *dev,
660 struct device_attribute *attr, char *buf)
661 {
662 struct ap_device *ap_dev = to_ap_dev(dev);
663 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
664 }
665
666 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
667 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
668 char *buf)
669 {
670 struct ap_device *ap_dev = to_ap_dev(dev);
671 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
672 }
673
674 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
675 static ssize_t ap_request_count_show(struct device *dev,
676 struct device_attribute *attr,
677 char *buf)
678 {
679 struct ap_device *ap_dev = to_ap_dev(dev);
680 int rc;
681
682 spin_lock_bh(&ap_dev->lock);
683 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
684 spin_unlock_bh(&ap_dev->lock);
685 return rc;
686 }
687
688 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
689
690 static ssize_t ap_requestq_count_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
692 {
693 struct ap_device *ap_dev = to_ap_dev(dev);
694 int rc;
695
696 spin_lock_bh(&ap_dev->lock);
697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
698 spin_unlock_bh(&ap_dev->lock);
699 return rc;
700 }
701
702 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
703
704 static ssize_t ap_pendingq_count_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
706 {
707 struct ap_device *ap_dev = to_ap_dev(dev);
708 int rc;
709
710 spin_lock_bh(&ap_dev->lock);
711 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
712 spin_unlock_bh(&ap_dev->lock);
713 return rc;
714 }
715
716 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
717
718 static ssize_t ap_modalias_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
720 {
721 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
722 }
723
724 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
725
726 static ssize_t ap_functions_show(struct device *dev,
727 struct device_attribute *attr, char *buf)
728 {
729 struct ap_device *ap_dev = to_ap_dev(dev);
730 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
731 }
732
733 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
734
735 static struct attribute *ap_dev_attrs[] = {
736 &dev_attr_hwtype.attr,
737 &dev_attr_depth.attr,
738 &dev_attr_request_count.attr,
739 &dev_attr_requestq_count.attr,
740 &dev_attr_pendingq_count.attr,
741 &dev_attr_modalias.attr,
742 &dev_attr_ap_functions.attr,
743 NULL
744 };
745 static struct attribute_group ap_dev_attr_group = {
746 .attrs = ap_dev_attrs
747 };
748
749 /**
750 * ap_bus_match()
751 * @dev: Pointer to device
752 * @drv: Pointer to device_driver
753 *
754 * AP bus driver registration/unregistration.
755 */
756 static int ap_bus_match(struct device *dev, struct device_driver *drv)
757 {
758 struct ap_device *ap_dev = to_ap_dev(dev);
759 struct ap_driver *ap_drv = to_ap_drv(drv);
760 struct ap_device_id *id;
761
762 /*
763 * Compare device type of the device with the list of
764 * supported types of the device_driver.
765 */
766 for (id = ap_drv->ids; id->match_flags; id++) {
767 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
768 (id->dev_type != ap_dev->device_type))
769 continue;
770 return 1;
771 }
772 return 0;
773 }
774
775 /**
776 * ap_uevent(): Uevent function for AP devices.
777 * @dev: Pointer to device
778 * @env: Pointer to kobj_uevent_env
779 *
780 * It sets up a single environment variable DEV_TYPE which contains the
781 * hardware device type.
782 */
783 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
784 {
785 struct ap_device *ap_dev = to_ap_dev(dev);
786 int retval = 0;
787
788 if (!ap_dev)
789 return -ENODEV;
790
791 /* Set up DEV_TYPE environment variable. */
792 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
793 if (retval)
794 return retval;
795
796 /* Add MODALIAS= */
797 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
798
799 return retval;
800 }
801
802 static int ap_bus_suspend(struct device *dev, pm_message_t state)
803 {
804 struct ap_device *ap_dev = to_ap_dev(dev);
805 unsigned long flags;
806
807 if (!ap_suspend_flag) {
808 ap_suspend_flag = 1;
809
810 /* Disable scanning for devices, thus we do not want to scan
811 * for them after removing.
812 */
813 del_timer_sync(&ap_config_timer);
814 if (ap_work_queue != NULL) {
815 destroy_workqueue(ap_work_queue);
816 ap_work_queue = NULL;
817 }
818
819 tasklet_disable(&ap_tasklet);
820 }
821 /* Poll on the device until all requests are finished. */
822 do {
823 flags = 0;
824 spin_lock_bh(&ap_dev->lock);
825 __ap_poll_device(ap_dev, &flags);
826 spin_unlock_bh(&ap_dev->lock);
827 } while ((flags & 1) || (flags & 2));
828
829 spin_lock_bh(&ap_dev->lock);
830 ap_dev->unregistered = 1;
831 spin_unlock_bh(&ap_dev->lock);
832
833 return 0;
834 }
835
836 static int ap_bus_resume(struct device *dev)
837 {
838 struct ap_device *ap_dev = to_ap_dev(dev);
839 int rc;
840
841 if (ap_suspend_flag) {
842 ap_suspend_flag = 0;
843 if (ap_interrupts_available()) {
844 if (!ap_using_interrupts()) {
845 rc = register_adapter_interrupt(&ap_airq);
846 ap_airq_flag = (rc == 0);
847 }
848 } else {
849 if (ap_using_interrupts()) {
850 unregister_adapter_interrupt(&ap_airq);
851 ap_airq_flag = 0;
852 }
853 }
854 ap_query_configuration();
855 if (!user_set_domain) {
856 ap_domain_index = -1;
857 ap_select_domain();
858 }
859 init_timer(&ap_config_timer);
860 ap_config_timer.function = ap_config_timeout;
861 ap_config_timer.data = 0;
862 ap_config_timer.expires = jiffies + ap_config_time * HZ;
863 add_timer(&ap_config_timer);
864 ap_work_queue = create_singlethread_workqueue("kapwork");
865 if (!ap_work_queue)
866 return -ENOMEM;
867 tasklet_enable(&ap_tasklet);
868 if (!ap_using_interrupts())
869 ap_schedule_poll_timer();
870 else
871 tasklet_schedule(&ap_tasklet);
872 if (ap_thread_flag)
873 rc = ap_poll_thread_start();
874 else
875 rc = 0;
876 } else
877 rc = 0;
878 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
879 spin_lock_bh(&ap_dev->lock);
880 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
881 ap_domain_index);
882 spin_unlock_bh(&ap_dev->lock);
883 }
884 queue_work(ap_work_queue, &ap_config_work);
885
886 return rc;
887 }
888
889 static struct bus_type ap_bus_type = {
890 .name = "ap",
891 .match = &ap_bus_match,
892 .uevent = &ap_uevent,
893 .suspend = ap_bus_suspend,
894 .resume = ap_bus_resume
895 };
896
897 static int ap_device_probe(struct device *dev)
898 {
899 struct ap_device *ap_dev = to_ap_dev(dev);
900 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
901 int rc;
902
903 ap_dev->drv = ap_drv;
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) {
906 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list);
908 spin_unlock_bh(&ap_device_list_lock);
909 }
910 return rc;
911 }
912
913 /**
914 * __ap_flush_queue(): Flush requests.
915 * @ap_dev: Pointer to the AP device
916 *
917 * Flush all requests from the request/pending queue of an AP device.
918 */
919 static void __ap_flush_queue(struct ap_device *ap_dev)
920 {
921 struct ap_message *ap_msg, *next;
922
923 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
924 list_del_init(&ap_msg->list);
925 ap_dev->pendingq_count--;
926 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
927 }
928 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
929 list_del_init(&ap_msg->list);
930 ap_dev->requestq_count--;
931 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
932 }
933 }
934
935 void ap_flush_queue(struct ap_device *ap_dev)
936 {
937 spin_lock_bh(&ap_dev->lock);
938 __ap_flush_queue(ap_dev);
939 spin_unlock_bh(&ap_dev->lock);
940 }
941 EXPORT_SYMBOL(ap_flush_queue);
942
943 static int ap_device_remove(struct device *dev)
944 {
945 struct ap_device *ap_dev = to_ap_dev(dev);
946 struct ap_driver *ap_drv = ap_dev->drv;
947
948 ap_flush_queue(ap_dev);
949 del_timer_sync(&ap_dev->timeout);
950 spin_lock_bh(&ap_device_list_lock);
951 list_del_init(&ap_dev->list);
952 spin_unlock_bh(&ap_device_list_lock);
953 if (ap_drv->remove)
954 ap_drv->remove(ap_dev);
955 spin_lock_bh(&ap_dev->lock);
956 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
957 spin_unlock_bh(&ap_dev->lock);
958 return 0;
959 }
960
961 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
962 char *name)
963 {
964 struct device_driver *drv = &ap_drv->driver;
965
966 drv->bus = &ap_bus_type;
967 drv->probe = ap_device_probe;
968 drv->remove = ap_device_remove;
969 drv->owner = owner;
970 drv->name = name;
971 return driver_register(drv);
972 }
973 EXPORT_SYMBOL(ap_driver_register);
974
975 void ap_driver_unregister(struct ap_driver *ap_drv)
976 {
977 driver_unregister(&ap_drv->driver);
978 }
979 EXPORT_SYMBOL(ap_driver_unregister);
980
981 void ap_bus_force_rescan(void)
982 {
983 /* reconfigure the AP bus rescan timer. */
984 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
985 /* processing a asynchronous bus rescan */
986 queue_work(ap_work_queue, &ap_config_work);
987 flush_work(&ap_config_work);
988 }
989 EXPORT_SYMBOL(ap_bus_force_rescan);
990
991 /*
992 * AP bus attributes.
993 */
994 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
995 {
996 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
997 }
998
999 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
1000
1001 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1002 {
1003 if (ap_configuration != NULL) { /* QCI not supported */
1004 if (test_facility(76)) { /* format 1 - 256 bit domain field */
1005 return snprintf(buf, PAGE_SIZE,
1006 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1007 ap_configuration->adm[0], ap_configuration->adm[1],
1008 ap_configuration->adm[2], ap_configuration->adm[3],
1009 ap_configuration->adm[4], ap_configuration->adm[5],
1010 ap_configuration->adm[6], ap_configuration->adm[7]);
1011 } else { /* format 0 - 16 bit domain field */
1012 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1013 ap_configuration->adm[0], ap_configuration->adm[1]);
1014 }
1015 } else {
1016 return snprintf(buf, PAGE_SIZE, "not supported\n");
1017 }
1018 }
1019
1020 static BUS_ATTR(ap_control_domain_mask, 0444,
1021 ap_control_domain_mask_show, NULL);
1022
1023 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
1024 {
1025 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1026 }
1027
1028 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1029 {
1030 return snprintf(buf, PAGE_SIZE, "%d\n",
1031 ap_using_interrupts() ? 1 : 0);
1032 }
1033
1034 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
1035
1036 static ssize_t ap_config_time_store(struct bus_type *bus,
1037 const char *buf, size_t count)
1038 {
1039 int time;
1040
1041 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1042 return -EINVAL;
1043 ap_config_time = time;
1044 if (!timer_pending(&ap_config_timer) ||
1045 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1046 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1047 add_timer(&ap_config_timer);
1048 }
1049 return count;
1050 }
1051
1052 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
1053
1054 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
1055 {
1056 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1057 }
1058
1059 static ssize_t ap_poll_thread_store(struct bus_type *bus,
1060 const char *buf, size_t count)
1061 {
1062 int flag, rc;
1063
1064 if (sscanf(buf, "%d\n", &flag) != 1)
1065 return -EINVAL;
1066 if (flag) {
1067 rc = ap_poll_thread_start();
1068 if (rc)
1069 return rc;
1070 }
1071 else
1072 ap_poll_thread_stop();
1073 return count;
1074 }
1075
1076 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
1077
1078 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1079 {
1080 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1081 }
1082
1083 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1084 size_t count)
1085 {
1086 unsigned long long time;
1087 ktime_t hr_time;
1088
1089 /* 120 seconds = maximum poll interval */
1090 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1091 time > 120000000000ULL)
1092 return -EINVAL;
1093 poll_timeout = time;
1094 hr_time = ktime_set(0, poll_timeout);
1095
1096 if (!hrtimer_is_queued(&ap_poll_timer) ||
1097 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
1098 hrtimer_set_expires(&ap_poll_timer, hr_time);
1099 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1100 }
1101 return count;
1102 }
1103
1104 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1105
1106 static struct bus_attribute *const ap_bus_attrs[] = {
1107 &bus_attr_ap_domain,
1108 &bus_attr_ap_control_domain_mask,
1109 &bus_attr_config_time,
1110 &bus_attr_poll_thread,
1111 &bus_attr_ap_interrupts,
1112 &bus_attr_poll_timeout,
1113 NULL,
1114 };
1115
1116 static inline int ap_test_config(unsigned int *field, unsigned int nr)
1117 {
1118 if (nr > 0xFFu)
1119 return 0;
1120 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1121 }
1122
1123 /*
1124 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1125 * @id AP card ID
1126 *
1127 * Returns 0 if the card is not configured
1128 * 1 if the card is configured or
1129 * if the configuration information is not available
1130 */
1131 static inline int ap_test_config_card_id(unsigned int id)
1132 {
1133 if (!ap_configuration)
1134 return 1;
1135 return ap_test_config(ap_configuration->apm, id);
1136 }
1137
1138 /*
1139 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1140 * @domain AP usage domain ID
1141 *
1142 * Returns 0 if the usage domain is not configured
1143 * 1 if the usage domain is configured or
1144 * if the configuration information is not available
1145 */
1146 static inline int ap_test_config_domain(unsigned int domain)
1147 {
1148 if (!ap_configuration)
1149 return 1;
1150 return ap_test_config(ap_configuration->aqm, domain);
1151 }
1152
1153 /**
1154 * ap_query_configuration(): Query AP configuration information.
1155 *
1156 * Query information of installed cards and configured domains from AP.
1157 */
1158 static void ap_query_configuration(void)
1159 {
1160 #ifdef CONFIG_64BIT
1161 if (ap_configuration_available()) {
1162 if (!ap_configuration)
1163 ap_configuration =
1164 kzalloc(sizeof(struct ap_config_info),
1165 GFP_KERNEL);
1166 if (ap_configuration)
1167 __ap_query_configuration(ap_configuration);
1168 } else
1169 ap_configuration = NULL;
1170 #else
1171 ap_configuration = NULL;
1172 #endif
1173 }
1174
1175 /**
1176 * ap_select_domain(): Select an AP domain.
1177 *
1178 * Pick one of the 16 AP domains.
1179 */
1180 static int ap_select_domain(void)
1181 {
1182 int queue_depth, device_type, count, max_count, best_domain;
1183 ap_qid_t qid;
1184 int rc, i, j;
1185
1186 /*
1187 * We want to use a single domain. Either the one specified with
1188 * the "domain=" parameter or the domain with the maximum number
1189 * of devices.
1190 */
1191 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1192 /* Domain has already been selected. */
1193 return 0;
1194 best_domain = -1;
1195 max_count = 0;
1196 for (i = 0; i < AP_DOMAINS; i++) {
1197 if (!ap_test_config_domain(i))
1198 continue;
1199 count = 0;
1200 for (j = 0; j < AP_DEVICES; j++) {
1201 if (!ap_test_config_card_id(j))
1202 continue;
1203 qid = AP_MKQID(j, i);
1204 rc = ap_query_queue(qid, &queue_depth, &device_type);
1205 if (rc)
1206 continue;
1207 count++;
1208 }
1209 if (count > max_count) {
1210 max_count = count;
1211 best_domain = i;
1212 }
1213 }
1214 if (best_domain >= 0){
1215 ap_domain_index = best_domain;
1216 return 0;
1217 }
1218 return -ENODEV;
1219 }
1220
1221 /**
1222 * ap_probe_device_type(): Find the device type of an AP.
1223 * @ap_dev: pointer to the AP device.
1224 *
1225 * Find the device type if query queue returned a device type of 0.
1226 */
1227 static int ap_probe_device_type(struct ap_device *ap_dev)
1228 {
1229 static unsigned char msg[] = {
1230 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1231 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1232 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1233 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1234 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1235 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1236 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1237 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1238 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1239 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1240 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1241 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1242 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1243 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1244 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1245 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1246 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1247 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1248 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1249 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1250 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1251 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1252 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1253 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1254 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1255 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1256 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1257 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1258 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1259 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1260 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1261 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1262 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1263 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1264 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1265 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1266 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1267 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1268 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1269 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1270 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1271 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1272 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1273 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1274 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1275 };
1276 struct ap_queue_status status;
1277 unsigned long long psmid;
1278 char *reply;
1279 int rc, i;
1280
1281 reply = (void *) get_zeroed_page(GFP_KERNEL);
1282 if (!reply) {
1283 rc = -ENOMEM;
1284 goto out;
1285 }
1286
1287 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1288 msg, sizeof(msg), 0);
1289 if (status.response_code != AP_RESPONSE_NORMAL) {
1290 rc = -ENODEV;
1291 goto out_free;
1292 }
1293
1294 /* Wait for the test message to complete. */
1295 for (i = 0; i < 6; i++) {
1296 mdelay(300);
1297 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1298 if (status.response_code == AP_RESPONSE_NORMAL &&
1299 psmid == 0x0102030405060708ULL)
1300 break;
1301 }
1302 if (i < 6) {
1303 /* Got an answer. */
1304 if (reply[0] == 0x00 && reply[1] == 0x86)
1305 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1306 else
1307 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1308 rc = 0;
1309 } else
1310 rc = -ENODEV;
1311
1312 out_free:
1313 free_page((unsigned long) reply);
1314 out:
1315 return rc;
1316 }
1317
1318 static void ap_interrupt_handler(struct airq_struct *airq)
1319 {
1320 inc_irq_stat(IRQIO_APB);
1321 tasklet_schedule(&ap_tasklet);
1322 }
1323
1324 /**
1325 * __ap_scan_bus(): Scan the AP bus.
1326 * @dev: Pointer to device
1327 * @data: Pointer to data
1328 *
1329 * Scan the AP bus for new devices.
1330 */
1331 static int __ap_scan_bus(struct device *dev, void *data)
1332 {
1333 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1334 }
1335
1336 static void ap_device_release(struct device *dev)
1337 {
1338 struct ap_device *ap_dev = to_ap_dev(dev);
1339
1340 kfree(ap_dev);
1341 }
1342
1343 static void ap_scan_bus(struct work_struct *unused)
1344 {
1345 struct ap_device *ap_dev;
1346 struct device *dev;
1347 ap_qid_t qid;
1348 int queue_depth, device_type;
1349 unsigned int device_functions;
1350 int rc, i;
1351
1352 ap_query_configuration();
1353 if (ap_select_domain() != 0) {
1354 return;
1355 }
1356 for (i = 0; i < AP_DEVICES; i++) {
1357 qid = AP_MKQID(i, ap_domain_index);
1358 dev = bus_find_device(&ap_bus_type, NULL,
1359 (void *)(unsigned long)qid,
1360 __ap_scan_bus);
1361 if (ap_test_config_card_id(i))
1362 rc = ap_query_queue(qid, &queue_depth, &device_type);
1363 else
1364 rc = -ENODEV;
1365 if (dev) {
1366 if (rc == -EBUSY) {
1367 set_current_state(TASK_UNINTERRUPTIBLE);
1368 schedule_timeout(AP_RESET_TIMEOUT);
1369 rc = ap_query_queue(qid, &queue_depth,
1370 &device_type);
1371 }
1372 ap_dev = to_ap_dev(dev);
1373 spin_lock_bh(&ap_dev->lock);
1374 if (rc || ap_dev->unregistered) {
1375 spin_unlock_bh(&ap_dev->lock);
1376 if (ap_dev->unregistered)
1377 i--;
1378 device_unregister(dev);
1379 put_device(dev);
1380 continue;
1381 }
1382 spin_unlock_bh(&ap_dev->lock);
1383 put_device(dev);
1384 continue;
1385 }
1386 if (rc)
1387 continue;
1388 rc = ap_init_queue(qid);
1389 if (rc)
1390 continue;
1391 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1392 if (!ap_dev)
1393 break;
1394 ap_dev->qid = qid;
1395 ap_dev->queue_depth = queue_depth;
1396 ap_dev->unregistered = 1;
1397 spin_lock_init(&ap_dev->lock);
1398 INIT_LIST_HEAD(&ap_dev->pendingq);
1399 INIT_LIST_HEAD(&ap_dev->requestq);
1400 INIT_LIST_HEAD(&ap_dev->list);
1401 setup_timer(&ap_dev->timeout, ap_request_timeout,
1402 (unsigned long) ap_dev);
1403 switch (device_type) {
1404 case 0:
1405 /* device type probing for old cards */
1406 if (ap_probe_device_type(ap_dev)) {
1407 kfree(ap_dev);
1408 continue;
1409 }
1410 break;
1411 default:
1412 ap_dev->device_type = device_type;
1413 }
1414
1415 rc = ap_query_functions(qid, &device_functions);
1416 if (!rc)
1417 ap_dev->functions = device_functions;
1418 else
1419 ap_dev->functions = 0u;
1420
1421 ap_dev->device.bus = &ap_bus_type;
1422 ap_dev->device.parent = ap_root_device;
1423 if (dev_set_name(&ap_dev->device, "card%02x",
1424 AP_QID_DEVICE(ap_dev->qid))) {
1425 kfree(ap_dev);
1426 continue;
1427 }
1428 ap_dev->device.release = ap_device_release;
1429 rc = device_register(&ap_dev->device);
1430 if (rc) {
1431 put_device(&ap_dev->device);
1432 continue;
1433 }
1434 /* Add device attributes. */
1435 rc = sysfs_create_group(&ap_dev->device.kobj,
1436 &ap_dev_attr_group);
1437 if (!rc) {
1438 spin_lock_bh(&ap_dev->lock);
1439 ap_dev->unregistered = 0;
1440 spin_unlock_bh(&ap_dev->lock);
1441 }
1442 else
1443 device_unregister(&ap_dev->device);
1444 }
1445 }
1446
1447 static void
1448 ap_config_timeout(unsigned long ptr)
1449 {
1450 queue_work(ap_work_queue, &ap_config_work);
1451 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1452 add_timer(&ap_config_timer);
1453 }
1454
1455 /**
1456 * __ap_schedule_poll_timer(): Schedule poll timer.
1457 *
1458 * Set up the timer to run the poll tasklet
1459 */
1460 static inline void __ap_schedule_poll_timer(void)
1461 {
1462 ktime_t hr_time;
1463
1464 spin_lock_bh(&ap_poll_timer_lock);
1465 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1466 goto out;
1467 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1468 hr_time = ktime_set(0, poll_timeout);
1469 hrtimer_forward_now(&ap_poll_timer, hr_time);
1470 hrtimer_restart(&ap_poll_timer);
1471 }
1472 out:
1473 spin_unlock_bh(&ap_poll_timer_lock);
1474 }
1475
1476 /**
1477 * ap_schedule_poll_timer(): Schedule poll timer.
1478 *
1479 * Set up the timer to run the poll tasklet
1480 */
1481 static inline void ap_schedule_poll_timer(void)
1482 {
1483 if (ap_using_interrupts())
1484 return;
1485 __ap_schedule_poll_timer();
1486 }
1487
1488 /**
1489 * ap_poll_read(): Receive pending reply messages from an AP device.
1490 * @ap_dev: pointer to the AP device
1491 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1492 * required, bit 2^1 is set if the poll timer needs to get armed
1493 *
1494 * Returns 0 if the device is still present, -ENODEV if not.
1495 */
1496 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1497 {
1498 struct ap_queue_status status;
1499 struct ap_message *ap_msg;
1500
1501 if (ap_dev->queue_count <= 0)
1502 return 0;
1503 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1504 ap_dev->reply->message, ap_dev->reply->length);
1505 switch (status.response_code) {
1506 case AP_RESPONSE_NORMAL:
1507 atomic_dec(&ap_poll_requests);
1508 ap_decrease_queue_count(ap_dev);
1509 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1510 if (ap_msg->psmid != ap_dev->reply->psmid)
1511 continue;
1512 list_del_init(&ap_msg->list);
1513 ap_dev->pendingq_count--;
1514 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1515 break;
1516 }
1517 if (ap_dev->queue_count > 0)
1518 *flags |= 1;
1519 break;
1520 case AP_RESPONSE_NO_PENDING_REPLY:
1521 if (status.queue_empty) {
1522 /* The card shouldn't forget requests but who knows. */
1523 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1524 ap_dev->queue_count = 0;
1525 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1526 ap_dev->requestq_count += ap_dev->pendingq_count;
1527 ap_dev->pendingq_count = 0;
1528 } else
1529 *flags |= 2;
1530 break;
1531 default:
1532 return -ENODEV;
1533 }
1534 return 0;
1535 }
1536
1537 /**
1538 * ap_poll_write(): Send messages from the request queue to an AP device.
1539 * @ap_dev: pointer to the AP device
1540 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1541 * required, bit 2^1 is set if the poll timer needs to get armed
1542 *
1543 * Returns 0 if the device is still present, -ENODEV if not.
1544 */
1545 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1546 {
1547 struct ap_queue_status status;
1548 struct ap_message *ap_msg;
1549
1550 if (ap_dev->requestq_count <= 0 ||
1551 ap_dev->queue_count >= ap_dev->queue_depth)
1552 return 0;
1553 /* Start the next request on the queue. */
1554 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1555 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1556 ap_msg->message, ap_msg->length, ap_msg->special);
1557 switch (status.response_code) {
1558 case AP_RESPONSE_NORMAL:
1559 atomic_inc(&ap_poll_requests);
1560 ap_increase_queue_count(ap_dev);
1561 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1562 ap_dev->requestq_count--;
1563 ap_dev->pendingq_count++;
1564 if (ap_dev->queue_count < ap_dev->queue_depth &&
1565 ap_dev->requestq_count > 0)
1566 *flags |= 1;
1567 *flags |= 2;
1568 break;
1569 case AP_RESPONSE_RESET_IN_PROGRESS:
1570 __ap_schedule_poll_timer();
1571 case AP_RESPONSE_Q_FULL:
1572 *flags |= 2;
1573 break;
1574 case AP_RESPONSE_MESSAGE_TOO_BIG:
1575 case AP_RESPONSE_REQ_FAC_NOT_INST:
1576 return -EINVAL;
1577 default:
1578 return -ENODEV;
1579 }
1580 return 0;
1581 }
1582
1583 /**
1584 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1585 * @ap_dev: pointer to the bus device
1586 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1587 * required, bit 2^1 is set if the poll timer needs to get armed
1588 *
1589 * Poll AP device for pending replies and send new messages. If either
1590 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1591 * Returns 0.
1592 */
1593 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1594 {
1595 int rc;
1596
1597 rc = ap_poll_read(ap_dev, flags);
1598 if (rc)
1599 return rc;
1600 return ap_poll_write(ap_dev, flags);
1601 }
1602
1603 /**
1604 * __ap_queue_message(): Queue a message to a device.
1605 * @ap_dev: pointer to the AP device
1606 * @ap_msg: the message to be queued
1607 *
1608 * Queue a message to a device. Returns 0 if successful.
1609 */
1610 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1611 {
1612 struct ap_queue_status status;
1613
1614 if (list_empty(&ap_dev->requestq) &&
1615 ap_dev->queue_count < ap_dev->queue_depth) {
1616 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1617 ap_msg->message, ap_msg->length,
1618 ap_msg->special);
1619 switch (status.response_code) {
1620 case AP_RESPONSE_NORMAL:
1621 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1622 atomic_inc(&ap_poll_requests);
1623 ap_dev->pendingq_count++;
1624 ap_increase_queue_count(ap_dev);
1625 ap_dev->total_request_count++;
1626 break;
1627 case AP_RESPONSE_Q_FULL:
1628 case AP_RESPONSE_RESET_IN_PROGRESS:
1629 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1630 ap_dev->requestq_count++;
1631 ap_dev->total_request_count++;
1632 return -EBUSY;
1633 case AP_RESPONSE_REQ_FAC_NOT_INST:
1634 case AP_RESPONSE_MESSAGE_TOO_BIG:
1635 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1636 return -EINVAL;
1637 default: /* Device is gone. */
1638 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1639 return -ENODEV;
1640 }
1641 } else {
1642 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1643 ap_dev->requestq_count++;
1644 ap_dev->total_request_count++;
1645 return -EBUSY;
1646 }
1647 ap_schedule_poll_timer();
1648 return 0;
1649 }
1650
1651 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1652 {
1653 unsigned long flags;
1654 int rc;
1655
1656 /* For asynchronous message handling a valid receive-callback
1657 * is required. */
1658 BUG_ON(!ap_msg->receive);
1659
1660 spin_lock_bh(&ap_dev->lock);
1661 if (!ap_dev->unregistered) {
1662 /* Make room on the queue by polling for finished requests. */
1663 rc = ap_poll_queue(ap_dev, &flags);
1664 if (!rc)
1665 rc = __ap_queue_message(ap_dev, ap_msg);
1666 if (!rc)
1667 wake_up(&ap_poll_wait);
1668 if (rc == -ENODEV)
1669 ap_dev->unregistered = 1;
1670 } else {
1671 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1672 rc = -ENODEV;
1673 }
1674 spin_unlock_bh(&ap_dev->lock);
1675 if (rc == -ENODEV)
1676 device_unregister(&ap_dev->device);
1677 }
1678 EXPORT_SYMBOL(ap_queue_message);
1679
1680 /**
1681 * ap_cancel_message(): Cancel a crypto request.
1682 * @ap_dev: The AP device that has the message queued
1683 * @ap_msg: The message that is to be removed
1684 *
1685 * Cancel a crypto request. This is done by removing the request
1686 * from the device pending or request queue. Note that the
1687 * request stays on the AP queue. When it finishes the message
1688 * reply will be discarded because the psmid can't be found.
1689 */
1690 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1691 {
1692 struct ap_message *tmp;
1693
1694 spin_lock_bh(&ap_dev->lock);
1695 if (!list_empty(&ap_msg->list)) {
1696 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1697 if (tmp->psmid == ap_msg->psmid) {
1698 ap_dev->pendingq_count--;
1699 goto found;
1700 }
1701 ap_dev->requestq_count--;
1702 found:
1703 list_del_init(&ap_msg->list);
1704 }
1705 spin_unlock_bh(&ap_dev->lock);
1706 }
1707 EXPORT_SYMBOL(ap_cancel_message);
1708
1709 /**
1710 * ap_poll_timeout(): AP receive polling for finished AP requests.
1711 * @unused: Unused pointer.
1712 *
1713 * Schedules the AP tasklet using a high resolution timer.
1714 */
1715 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1716 {
1717 tasklet_schedule(&ap_tasklet);
1718 return HRTIMER_NORESTART;
1719 }
1720
1721 /**
1722 * ap_reset(): Reset a not responding AP device.
1723 * @ap_dev: Pointer to the AP device
1724 *
1725 * Reset a not responding AP device and move all requests from the
1726 * pending queue to the request queue.
1727 */
1728 static void ap_reset(struct ap_device *ap_dev)
1729 {
1730 int rc;
1731
1732 ap_dev->reset = AP_RESET_IGNORE;
1733 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1734 ap_dev->queue_count = 0;
1735 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1736 ap_dev->requestq_count += ap_dev->pendingq_count;
1737 ap_dev->pendingq_count = 0;
1738 rc = ap_init_queue(ap_dev->qid);
1739 if (rc == -ENODEV)
1740 ap_dev->unregistered = 1;
1741 else
1742 __ap_schedule_poll_timer();
1743 }
1744
1745 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1746 {
1747 if (!ap_dev->unregistered) {
1748 if (ap_poll_queue(ap_dev, flags))
1749 ap_dev->unregistered = 1;
1750 if (ap_dev->reset == AP_RESET_DO)
1751 ap_reset(ap_dev);
1752 }
1753 return 0;
1754 }
1755
1756 /**
1757 * ap_poll_all(): Poll all AP devices.
1758 * @dummy: Unused variable
1759 *
1760 * Poll all AP devices on the bus in a round robin fashion. Continue
1761 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1762 * of the control flags has been set arm the poll timer.
1763 */
1764 static void ap_poll_all(unsigned long dummy)
1765 {
1766 unsigned long flags;
1767 struct ap_device *ap_dev;
1768
1769 /* Reset the indicator if interrupts are used. Thus new interrupts can
1770 * be received. Doing it in the beginning of the tasklet is therefor
1771 * important that no requests on any AP get lost.
1772 */
1773 if (ap_using_interrupts())
1774 xchg(ap_airq.lsi_ptr, 0);
1775 do {
1776 flags = 0;
1777 spin_lock(&ap_device_list_lock);
1778 list_for_each_entry(ap_dev, &ap_device_list, list) {
1779 spin_lock(&ap_dev->lock);
1780 __ap_poll_device(ap_dev, &flags);
1781 spin_unlock(&ap_dev->lock);
1782 }
1783 spin_unlock(&ap_device_list_lock);
1784 } while (flags & 1);
1785 if (flags & 2)
1786 ap_schedule_poll_timer();
1787 }
1788
1789 /**
1790 * ap_poll_thread(): Thread that polls for finished requests.
1791 * @data: Unused pointer
1792 *
1793 * AP bus poll thread. The purpose of this thread is to poll for
1794 * finished requests in a loop if there is a "free" cpu - that is
1795 * a cpu that doesn't have anything better to do. The polling stops
1796 * as soon as there is another task or if all messages have been
1797 * delivered.
1798 */
1799 static int ap_poll_thread(void *data)
1800 {
1801 DECLARE_WAITQUEUE(wait, current);
1802 unsigned long flags;
1803 int requests;
1804 struct ap_device *ap_dev;
1805
1806 set_user_nice(current, 19);
1807 while (1) {
1808 if (ap_suspend_flag)
1809 return 0;
1810 if (need_resched()) {
1811 schedule();
1812 continue;
1813 }
1814 add_wait_queue(&ap_poll_wait, &wait);
1815 set_current_state(TASK_INTERRUPTIBLE);
1816 if (kthread_should_stop())
1817 break;
1818 requests = atomic_read(&ap_poll_requests);
1819 if (requests <= 0)
1820 schedule();
1821 set_current_state(TASK_RUNNING);
1822 remove_wait_queue(&ap_poll_wait, &wait);
1823
1824 flags = 0;
1825 spin_lock_bh(&ap_device_list_lock);
1826 list_for_each_entry(ap_dev, &ap_device_list, list) {
1827 spin_lock(&ap_dev->lock);
1828 __ap_poll_device(ap_dev, &flags);
1829 spin_unlock(&ap_dev->lock);
1830 }
1831 spin_unlock_bh(&ap_device_list_lock);
1832 }
1833 set_current_state(TASK_RUNNING);
1834 remove_wait_queue(&ap_poll_wait, &wait);
1835 return 0;
1836 }
1837
1838 static int ap_poll_thread_start(void)
1839 {
1840 int rc;
1841
1842 if (ap_using_interrupts() || ap_suspend_flag)
1843 return 0;
1844 mutex_lock(&ap_poll_thread_mutex);
1845 if (!ap_poll_kthread) {
1846 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1847 rc = PTR_RET(ap_poll_kthread);
1848 if (rc)
1849 ap_poll_kthread = NULL;
1850 }
1851 else
1852 rc = 0;
1853 mutex_unlock(&ap_poll_thread_mutex);
1854 return rc;
1855 }
1856
1857 static void ap_poll_thread_stop(void)
1858 {
1859 mutex_lock(&ap_poll_thread_mutex);
1860 if (ap_poll_kthread) {
1861 kthread_stop(ap_poll_kthread);
1862 ap_poll_kthread = NULL;
1863 }
1864 mutex_unlock(&ap_poll_thread_mutex);
1865 }
1866
1867 /**
1868 * ap_request_timeout(): Handling of request timeouts
1869 * @data: Holds the AP device.
1870 *
1871 * Handles request timeouts.
1872 */
1873 static void ap_request_timeout(unsigned long data)
1874 {
1875 struct ap_device *ap_dev = (struct ap_device *) data;
1876
1877 if (ap_dev->reset == AP_RESET_ARMED) {
1878 ap_dev->reset = AP_RESET_DO;
1879
1880 if (ap_using_interrupts())
1881 tasklet_schedule(&ap_tasklet);
1882 }
1883 }
1884
1885 static void ap_reset_domain(void)
1886 {
1887 int i;
1888
1889 if (ap_domain_index != -1)
1890 for (i = 0; i < AP_DEVICES; i++)
1891 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1892 }
1893
1894 static void ap_reset_all(void)
1895 {
1896 int i, j;
1897
1898 for (i = 0; i < AP_DOMAINS; i++)
1899 for (j = 0; j < AP_DEVICES; j++)
1900 ap_reset_queue(AP_MKQID(j, i));
1901 }
1902
1903 static struct reset_call ap_reset_call = {
1904 .fn = ap_reset_all,
1905 };
1906
1907 /**
1908 * ap_module_init(): The module initialization code.
1909 *
1910 * Initializes the module.
1911 */
1912 int __init ap_module_init(void)
1913 {
1914 int rc, i;
1915
1916 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1917 pr_warning("%d is not a valid cryptographic domain\n",
1918 ap_domain_index);
1919 return -EINVAL;
1920 }
1921 /* In resume callback we need to know if the user had set the domain.
1922 * If so, we can not just reset it.
1923 */
1924 if (ap_domain_index >= 0)
1925 user_set_domain = 1;
1926
1927 if (ap_instructions_available() != 0) {
1928 pr_warning("The hardware system does not support "
1929 "AP instructions\n");
1930 return -ENODEV;
1931 }
1932 if (ap_interrupts_available()) {
1933 rc = register_adapter_interrupt(&ap_airq);
1934 ap_airq_flag = (rc == 0);
1935 }
1936
1937 register_reset_call(&ap_reset_call);
1938
1939 /* Create /sys/bus/ap. */
1940 rc = bus_register(&ap_bus_type);
1941 if (rc)
1942 goto out;
1943 for (i = 0; ap_bus_attrs[i]; i++) {
1944 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1945 if (rc)
1946 goto out_bus;
1947 }
1948
1949 /* Create /sys/devices/ap. */
1950 ap_root_device = root_device_register("ap");
1951 rc = PTR_RET(ap_root_device);
1952 if (rc)
1953 goto out_bus;
1954
1955 ap_work_queue = create_singlethread_workqueue("kapwork");
1956 if (!ap_work_queue) {
1957 rc = -ENOMEM;
1958 goto out_root;
1959 }
1960
1961 ap_query_configuration();
1962 if (ap_select_domain() == 0)
1963 ap_scan_bus(NULL);
1964
1965 /* Setup the AP bus rescan timer. */
1966 init_timer(&ap_config_timer);
1967 ap_config_timer.function = ap_config_timeout;
1968 ap_config_timer.data = 0;
1969 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1970 add_timer(&ap_config_timer);
1971
1972 /* Setup the high resultion poll timer.
1973 * If we are running under z/VM adjust polling to z/VM polling rate.
1974 */
1975 if (MACHINE_IS_VM)
1976 poll_timeout = 1500000;
1977 spin_lock_init(&ap_poll_timer_lock);
1978 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1979 ap_poll_timer.function = ap_poll_timeout;
1980
1981 /* Start the low priority AP bus poll thread. */
1982 if (ap_thread_flag) {
1983 rc = ap_poll_thread_start();
1984 if (rc)
1985 goto out_work;
1986 }
1987
1988 return 0;
1989
1990 out_work:
1991 del_timer_sync(&ap_config_timer);
1992 hrtimer_cancel(&ap_poll_timer);
1993 destroy_workqueue(ap_work_queue);
1994 out_root:
1995 root_device_unregister(ap_root_device);
1996 out_bus:
1997 while (i--)
1998 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1999 bus_unregister(&ap_bus_type);
2000 out:
2001 unregister_reset_call(&ap_reset_call);
2002 if (ap_using_interrupts())
2003 unregister_adapter_interrupt(&ap_airq);
2004 return rc;
2005 }
2006
2007 static int __ap_match_all(struct device *dev, void *data)
2008 {
2009 return 1;
2010 }
2011
2012 /**
2013 * ap_modules_exit(): The module termination code
2014 *
2015 * Terminates the module.
2016 */
2017 void ap_module_exit(void)
2018 {
2019 int i;
2020 struct device *dev;
2021
2022 ap_reset_domain();
2023 ap_poll_thread_stop();
2024 del_timer_sync(&ap_config_timer);
2025 hrtimer_cancel(&ap_poll_timer);
2026 destroy_workqueue(ap_work_queue);
2027 tasklet_kill(&ap_tasklet);
2028 root_device_unregister(ap_root_device);
2029 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
2030 __ap_match_all)))
2031 {
2032 device_unregister(dev);
2033 put_device(dev);
2034 }
2035 for (i = 0; ap_bus_attrs[i]; i++)
2036 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2037 bus_unregister(&ap_bus_type);
2038 unregister_reset_call(&ap_reset_call);
2039 if (ap_using_interrupts())
2040 unregister_adapter_interrupt(&ap_airq);
2041 }
2042
2043 module_init(ap_module_init);
2044 module_exit(ap_module_exit);
This page took 0.086808 seconds and 6 git commands to generate.