239cf0bbc1a1d575553ee7449042bafd72bfcd60
[deliverable/linux.git] / drivers / char / tpm / tpm_tis.c
1 /*
2 * Copyright (C) 2005, 2006 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
7 *
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9 *
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
12 *
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include <linux/freezer.h>
30 #include "tpm.h"
31
32 enum tis_access {
33 TPM_ACCESS_VALID = 0x80,
34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
35 TPM_ACCESS_REQUEST_PENDING = 0x04,
36 TPM_ACCESS_REQUEST_USE = 0x02,
37 };
38
39 enum tis_status {
40 TPM_STS_VALID = 0x80,
41 TPM_STS_COMMAND_READY = 0x40,
42 TPM_STS_GO = 0x20,
43 TPM_STS_DATA_AVAIL = 0x10,
44 TPM_STS_DATA_EXPECT = 0x08,
45 };
46
47 enum tis_int_flags {
48 TPM_GLOBAL_INT_ENABLE = 0x80000000,
49 TPM_INTF_BURST_COUNT_STATIC = 0x100,
50 TPM_INTF_CMD_READY_INT = 0x080,
51 TPM_INTF_INT_EDGE_FALLING = 0x040,
52 TPM_INTF_INT_EDGE_RISING = 0x020,
53 TPM_INTF_INT_LEVEL_LOW = 0x010,
54 TPM_INTF_INT_LEVEL_HIGH = 0x008,
55 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
56 TPM_INTF_STS_VALID_INT = 0x002,
57 TPM_INTF_DATA_AVAIL_INT = 0x001,
58 };
59
60 enum tis_defaults {
61 TIS_MEM_BASE = 0xFED40000,
62 TIS_MEM_LEN = 0x5000,
63 TIS_SHORT_TIMEOUT = 750, /* ms */
64 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
65 };
66
67 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
68 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
69 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
70 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
71 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
72 #define TPM_STS(l) (0x0018 | ((l) << 12))
73 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
74
75 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
76 #define TPM_RID(l) (0x0F04 | ((l) << 12))
77
78 struct priv_data {
79 bool irq_tested;
80 };
81
82 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
83 static int is_itpm(struct pnp_dev *dev)
84 {
85 struct acpi_device *acpi = pnp_acpi_device(dev);
86 struct acpi_hardware_id *id;
87
88 if (!acpi)
89 return 0;
90
91 list_for_each_entry(id, &acpi->pnp.ids, list) {
92 if (!strcmp("INTC0102", id->id))
93 return 1;
94 }
95
96 return 0;
97 }
98 #else
99 static inline int is_itpm(struct pnp_dev *dev)
100 {
101 return 0;
102 }
103 #endif
104
105 /* Before we attempt to access the TPM we must see that the valid bit is set.
106 * The specification says that this bit is 0 at reset and remains 0 until the
107 * 'TPM has gone through its self test and initialization and has established
108 * correct values in the other bits.' */
109 static int wait_startup(struct tpm_chip *chip, int l)
110 {
111 unsigned long stop = jiffies + chip->vendor.timeout_a;
112 do {
113 if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
114 TPM_ACCESS_VALID)
115 return 0;
116 msleep(TPM_TIMEOUT);
117 } while (time_before(jiffies, stop));
118 return -1;
119 }
120
121 static int check_locality(struct tpm_chip *chip, int l)
122 {
123 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
124 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
125 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
126 return chip->vendor.locality = l;
127
128 return -1;
129 }
130
131 static void release_locality(struct tpm_chip *chip, int l, int force)
132 {
133 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
134 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
135 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
136 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
137 chip->vendor.iobase + TPM_ACCESS(l));
138 }
139
140 static int request_locality(struct tpm_chip *chip, int l)
141 {
142 unsigned long stop, timeout;
143 long rc;
144
145 if (check_locality(chip, l) >= 0)
146 return l;
147
148 iowrite8(TPM_ACCESS_REQUEST_USE,
149 chip->vendor.iobase + TPM_ACCESS(l));
150
151 stop = jiffies + chip->vendor.timeout_a;
152
153 if (chip->vendor.irq) {
154 again:
155 timeout = stop - jiffies;
156 if ((long)timeout <= 0)
157 return -1;
158 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
159 (check_locality
160 (chip, l) >= 0),
161 timeout);
162 if (rc > 0)
163 return l;
164 if (rc == -ERESTARTSYS && freezing(current)) {
165 clear_thread_flag(TIF_SIGPENDING);
166 goto again;
167 }
168 } else {
169 /* wait for burstcount */
170 do {
171 if (check_locality(chip, l) >= 0)
172 return l;
173 msleep(TPM_TIMEOUT);
174 }
175 while (time_before(jiffies, stop));
176 }
177 return -1;
178 }
179
180 static u8 tpm_tis_status(struct tpm_chip *chip)
181 {
182 return ioread8(chip->vendor.iobase +
183 TPM_STS(chip->vendor.locality));
184 }
185
186 static void tpm_tis_ready(struct tpm_chip *chip)
187 {
188 /* this causes the current command to be aborted */
189 iowrite8(TPM_STS_COMMAND_READY,
190 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
191 }
192
193 static int get_burstcount(struct tpm_chip *chip)
194 {
195 unsigned long stop;
196 int burstcnt;
197
198 /* wait for burstcount */
199 /* which timeout value, spec has 2 answers (c & d) */
200 stop = jiffies + chip->vendor.timeout_d;
201 do {
202 burstcnt = ioread8(chip->vendor.iobase +
203 TPM_STS(chip->vendor.locality) + 1);
204 burstcnt += ioread8(chip->vendor.iobase +
205 TPM_STS(chip->vendor.locality) +
206 2) << 8;
207 if (burstcnt)
208 return burstcnt;
209 msleep(TPM_TIMEOUT);
210 } while (time_before(jiffies, stop));
211 return -EBUSY;
212 }
213
214 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
215 {
216 int size = 0, burstcnt;
217 while (size < count &&
218 wait_for_tpm_stat(chip,
219 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
220 chip->vendor.timeout_c,
221 &chip->vendor.read_queue, true)
222 == 0) {
223 burstcnt = get_burstcount(chip);
224 for (; burstcnt > 0 && size < count; burstcnt--)
225 buf[size++] = ioread8(chip->vendor.iobase +
226 TPM_DATA_FIFO(chip->vendor.
227 locality));
228 }
229 return size;
230 }
231
232 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
233 {
234 int size = 0;
235 int expected, status;
236
237 if (count < TPM_HEADER_SIZE) {
238 size = -EIO;
239 goto out;
240 }
241
242 /* read first 10 bytes, including tag, paramsize, and result */
243 if ((size =
244 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
245 dev_err(chip->pdev, "Unable to read header\n");
246 goto out;
247 }
248
249 expected = be32_to_cpu(*(__be32 *) (buf + 2));
250 if (expected > count) {
251 size = -EIO;
252 goto out;
253 }
254
255 if ((size +=
256 recv_data(chip, &buf[TPM_HEADER_SIZE],
257 expected - TPM_HEADER_SIZE)) < expected) {
258 dev_err(chip->pdev, "Unable to read remainder of result\n");
259 size = -ETIME;
260 goto out;
261 }
262
263 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
264 &chip->vendor.int_queue, false);
265 status = tpm_tis_status(chip);
266 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
267 dev_err(chip->pdev, "Error left over data\n");
268 size = -EIO;
269 goto out;
270 }
271
272 out:
273 tpm_tis_ready(chip);
274 release_locality(chip, chip->vendor.locality, 0);
275 return size;
276 }
277
278 static bool itpm;
279 module_param(itpm, bool, 0444);
280 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
281
282 /*
283 * If interrupts are used (signaled by an irq set in the vendor structure)
284 * tpm.c can skip polling for the data to be available as the interrupt is
285 * waited for here
286 */
287 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
288 {
289 int rc, status, burstcnt;
290 size_t count = 0;
291
292 if (request_locality(chip, 0) < 0)
293 return -EBUSY;
294
295 status = tpm_tis_status(chip);
296 if ((status & TPM_STS_COMMAND_READY) == 0) {
297 tpm_tis_ready(chip);
298 if (wait_for_tpm_stat
299 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
300 &chip->vendor.int_queue, false) < 0) {
301 rc = -ETIME;
302 goto out_err;
303 }
304 }
305
306 while (count < len - 1) {
307 burstcnt = get_burstcount(chip);
308 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
309 iowrite8(buf[count], chip->vendor.iobase +
310 TPM_DATA_FIFO(chip->vendor.locality));
311 count++;
312 }
313
314 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
315 &chip->vendor.int_queue, false);
316 status = tpm_tis_status(chip);
317 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
318 rc = -EIO;
319 goto out_err;
320 }
321 }
322
323 /* write last byte */
324 iowrite8(buf[count],
325 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
326 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
327 &chip->vendor.int_queue, false);
328 status = tpm_tis_status(chip);
329 if ((status & TPM_STS_DATA_EXPECT) != 0) {
330 rc = -EIO;
331 goto out_err;
332 }
333
334 return 0;
335
336 out_err:
337 tpm_tis_ready(chip);
338 release_locality(chip, chip->vendor.locality, 0);
339 return rc;
340 }
341
342 static void disable_interrupts(struct tpm_chip *chip)
343 {
344 u32 intmask;
345
346 intmask =
347 ioread32(chip->vendor.iobase +
348 TPM_INT_ENABLE(chip->vendor.locality));
349 intmask &= ~TPM_GLOBAL_INT_ENABLE;
350 iowrite32(intmask,
351 chip->vendor.iobase +
352 TPM_INT_ENABLE(chip->vendor.locality));
353 free_irq(chip->vendor.irq, chip);
354 chip->vendor.irq = 0;
355 }
356
357 /*
358 * If interrupts are used (signaled by an irq set in the vendor structure)
359 * tpm.c can skip polling for the data to be available as the interrupt is
360 * waited for here
361 */
362 static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
363 {
364 int rc;
365 u32 ordinal;
366
367 rc = tpm_tis_send_data(chip, buf, len);
368 if (rc < 0)
369 return rc;
370
371 /* go and do it */
372 iowrite8(TPM_STS_GO,
373 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
374
375 if (chip->vendor.irq) {
376 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
377 if (wait_for_tpm_stat
378 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
379 tpm_calc_ordinal_duration(chip, ordinal),
380 &chip->vendor.read_queue, false) < 0) {
381 rc = -ETIME;
382 goto out_err;
383 }
384 }
385 return len;
386 out_err:
387 tpm_tis_ready(chip);
388 release_locality(chip, chip->vendor.locality, 0);
389 return rc;
390 }
391
392 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
393 {
394 int rc, irq;
395 struct priv_data *priv = chip->vendor.priv;
396
397 if (!chip->vendor.irq || priv->irq_tested)
398 return tpm_tis_send_main(chip, buf, len);
399
400 /* Verify receipt of the expected IRQ */
401 irq = chip->vendor.irq;
402 chip->vendor.irq = 0;
403 rc = tpm_tis_send_main(chip, buf, len);
404 chip->vendor.irq = irq;
405 if (!priv->irq_tested)
406 msleep(1);
407 if (!priv->irq_tested) {
408 disable_interrupts(chip);
409 dev_err(chip->pdev,
410 FW_BUG "TPM interrupt not working, polling instead\n");
411 }
412 priv->irq_tested = true;
413 return rc;
414 }
415
416 struct tis_vendor_timeout_override {
417 u32 did_vid;
418 unsigned long timeout_us[4];
419 };
420
421 static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
422 /* Atmel 3204 */
423 { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
424 (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
425 };
426
427 static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
428 unsigned long *timeout_cap)
429 {
430 int i;
431 u32 did_vid;
432
433 did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
434
435 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
436 if (vendor_timeout_overrides[i].did_vid != did_vid)
437 continue;
438 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
439 sizeof(vendor_timeout_overrides[i].timeout_us));
440 return true;
441 }
442
443 return false;
444 }
445
446 /*
447 * Early probing for iTPM with STS_DATA_EXPECT flaw.
448 * Try sending command without itpm flag set and if that
449 * fails, repeat with itpm flag set.
450 */
451 static int probe_itpm(struct tpm_chip *chip)
452 {
453 int rc = 0;
454 u8 cmd_getticks[] = {
455 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
456 0x00, 0x00, 0x00, 0xf1
457 };
458 size_t len = sizeof(cmd_getticks);
459 bool rem_itpm = itpm;
460 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
461
462 /* probe only iTPMS */
463 if (vendor != TPM_VID_INTEL)
464 return 0;
465
466 itpm = false;
467
468 rc = tpm_tis_send_data(chip, cmd_getticks, len);
469 if (rc == 0)
470 goto out;
471
472 tpm_tis_ready(chip);
473 release_locality(chip, chip->vendor.locality, 0);
474
475 itpm = true;
476
477 rc = tpm_tis_send_data(chip, cmd_getticks, len);
478 if (rc == 0) {
479 dev_info(chip->pdev, "Detected an iTPM.\n");
480 rc = 1;
481 } else
482 rc = -EFAULT;
483
484 out:
485 itpm = rem_itpm;
486 tpm_tis_ready(chip);
487 release_locality(chip, chip->vendor.locality, 0);
488
489 return rc;
490 }
491
492 static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
493 {
494 switch (chip->vendor.manufacturer_id) {
495 case TPM_VID_WINBOND:
496 return ((status == TPM_STS_VALID) ||
497 (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
498 case TPM_VID_STM:
499 return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
500 default:
501 return (status == TPM_STS_COMMAND_READY);
502 }
503 }
504
505 static const struct tpm_class_ops tpm_tis = {
506 .status = tpm_tis_status,
507 .recv = tpm_tis_recv,
508 .send = tpm_tis_send,
509 .cancel = tpm_tis_ready,
510 .update_timeouts = tpm_tis_update_timeouts,
511 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
512 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
513 .req_canceled = tpm_tis_req_canceled,
514 };
515
516 static irqreturn_t tis_int_probe(int irq, void *dev_id)
517 {
518 struct tpm_chip *chip = dev_id;
519 u32 interrupt;
520
521 interrupt = ioread32(chip->vendor.iobase +
522 TPM_INT_STATUS(chip->vendor.locality));
523
524 if (interrupt == 0)
525 return IRQ_NONE;
526
527 chip->vendor.probed_irq = irq;
528
529 /* Clear interrupts handled with TPM_EOI */
530 iowrite32(interrupt,
531 chip->vendor.iobase +
532 TPM_INT_STATUS(chip->vendor.locality));
533 return IRQ_HANDLED;
534 }
535
536 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
537 {
538 struct tpm_chip *chip = dev_id;
539 u32 interrupt;
540 int i;
541
542 interrupt = ioread32(chip->vendor.iobase +
543 TPM_INT_STATUS(chip->vendor.locality));
544
545 if (interrupt == 0)
546 return IRQ_NONE;
547
548 ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
549 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
550 wake_up_interruptible(&chip->vendor.read_queue);
551 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
552 for (i = 0; i < 5; i++)
553 if (check_locality(chip, i) >= 0)
554 break;
555 if (interrupt &
556 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
557 TPM_INTF_CMD_READY_INT))
558 wake_up_interruptible(&chip->vendor.int_queue);
559
560 /* Clear interrupts handled with TPM_EOI */
561 iowrite32(interrupt,
562 chip->vendor.iobase +
563 TPM_INT_STATUS(chip->vendor.locality));
564 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
565 return IRQ_HANDLED;
566 }
567
568 static bool interrupts = true;
569 module_param(interrupts, bool, 0444);
570 MODULE_PARM_DESC(interrupts, "Enable interrupts");
571
572 static void tpm_tis_remove(struct tpm_chip *chip)
573 {
574 iowrite32(~TPM_GLOBAL_INT_ENABLE &
575 ioread32(chip->vendor.iobase +
576 TPM_INT_ENABLE(chip->vendor.
577 locality)),
578 chip->vendor.iobase +
579 TPM_INT_ENABLE(chip->vendor.locality));
580 release_locality(chip, chip->vendor.locality, 1);
581 }
582
583 static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
584 resource_size_t start, resource_size_t len,
585 unsigned int irq)
586 {
587 u32 vendor, intfcaps, intmask;
588 int rc, i, irq_s, irq_e, probe;
589 struct tpm_chip *chip;
590 struct priv_data *priv;
591
592 priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
593 if (priv == NULL)
594 return -ENOMEM;
595
596 chip = tpmm_chip_alloc(dev, &tpm_tis);
597 if (IS_ERR(chip))
598 return PTR_ERR(chip);
599
600 chip->vendor.priv = priv;
601 chip->acpi_dev_handle = acpi_dev_handle;
602
603 chip->vendor.iobase = devm_ioremap(dev, start, len);
604 if (!chip->vendor.iobase)
605 return -EIO;
606
607 /* Default timeouts */
608 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
609 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
610 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
611 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
612
613 if (wait_startup(chip, 0) != 0) {
614 rc = -ENODEV;
615 goto out_err;
616 }
617
618 if (request_locality(chip, 0) != 0) {
619 rc = -ENODEV;
620 goto out_err;
621 }
622
623 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
624 chip->vendor.manufacturer_id = vendor;
625
626 dev_info(dev,
627 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
628 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
629
630 if (!itpm) {
631 probe = probe_itpm(chip);
632 if (probe < 0) {
633 rc = -ENODEV;
634 goto out_err;
635 }
636 itpm = !!probe;
637 }
638
639 if (itpm)
640 dev_info(dev, "Intel iTPM workaround enabled\n");
641
642
643 /* Figure out the capabilities */
644 intfcaps =
645 ioread32(chip->vendor.iobase +
646 TPM_INTF_CAPS(chip->vendor.locality));
647 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
648 intfcaps);
649 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
650 dev_dbg(dev, "\tBurst Count Static\n");
651 if (intfcaps & TPM_INTF_CMD_READY_INT)
652 dev_dbg(dev, "\tCommand Ready Int Support\n");
653 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
654 dev_dbg(dev, "\tInterrupt Edge Falling\n");
655 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
656 dev_dbg(dev, "\tInterrupt Edge Rising\n");
657 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
658 dev_dbg(dev, "\tInterrupt Level Low\n");
659 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
660 dev_dbg(dev, "\tInterrupt Level High\n");
661 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
662 dev_dbg(dev, "\tLocality Change Int Support\n");
663 if (intfcaps & TPM_INTF_STS_VALID_INT)
664 dev_dbg(dev, "\tSts Valid Int Support\n");
665 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
666 dev_dbg(dev, "\tData Avail Int Support\n");
667
668 /* INTERRUPT Setup */
669 init_waitqueue_head(&chip->vendor.read_queue);
670 init_waitqueue_head(&chip->vendor.int_queue);
671
672 intmask =
673 ioread32(chip->vendor.iobase +
674 TPM_INT_ENABLE(chip->vendor.locality));
675
676 intmask |= TPM_INTF_CMD_READY_INT
677 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
678 | TPM_INTF_STS_VALID_INT;
679
680 iowrite32(intmask,
681 chip->vendor.iobase +
682 TPM_INT_ENABLE(chip->vendor.locality));
683 if (interrupts)
684 chip->vendor.irq = irq;
685 if (interrupts && !chip->vendor.irq) {
686 irq_s =
687 ioread8(chip->vendor.iobase +
688 TPM_INT_VECTOR(chip->vendor.locality));
689 if (irq_s) {
690 irq_e = irq_s;
691 } else {
692 irq_s = 3;
693 irq_e = 15;
694 }
695
696 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
697 iowrite8(i, chip->vendor.iobase +
698 TPM_INT_VECTOR(chip->vendor.locality));
699 if (devm_request_irq
700 (dev, i, tis_int_probe, IRQF_SHARED,
701 chip->devname, chip) != 0) {
702 dev_info(chip->pdev,
703 "Unable to request irq: %d for probe\n",
704 i);
705 continue;
706 }
707
708 /* Clear all existing */
709 iowrite32(ioread32
710 (chip->vendor.iobase +
711 TPM_INT_STATUS(chip->vendor.locality)),
712 chip->vendor.iobase +
713 TPM_INT_STATUS(chip->vendor.locality));
714
715 /* Turn on */
716 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
717 chip->vendor.iobase +
718 TPM_INT_ENABLE(chip->vendor.locality));
719
720 chip->vendor.probed_irq = 0;
721
722 /* Generate Interrupts */
723 tpm_gen_interrupt(chip);
724
725 chip->vendor.irq = chip->vendor.probed_irq;
726
727 /* free_irq will call into tis_int_probe;
728 clear all irqs we haven't seen while doing
729 tpm_gen_interrupt */
730 iowrite32(ioread32
731 (chip->vendor.iobase +
732 TPM_INT_STATUS(chip->vendor.locality)),
733 chip->vendor.iobase +
734 TPM_INT_STATUS(chip->vendor.locality));
735
736 /* Turn off */
737 iowrite32(intmask,
738 chip->vendor.iobase +
739 TPM_INT_ENABLE(chip->vendor.locality));
740 }
741 }
742 if (chip->vendor.irq) {
743 iowrite8(chip->vendor.irq,
744 chip->vendor.iobase +
745 TPM_INT_VECTOR(chip->vendor.locality));
746 if (devm_request_irq
747 (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
748 chip->devname, chip) != 0) {
749 dev_info(chip->pdev,
750 "Unable to request irq: %d for use\n",
751 chip->vendor.irq);
752 chip->vendor.irq = 0;
753 } else {
754 /* Clear all existing */
755 iowrite32(ioread32
756 (chip->vendor.iobase +
757 TPM_INT_STATUS(chip->vendor.locality)),
758 chip->vendor.iobase +
759 TPM_INT_STATUS(chip->vendor.locality));
760
761 /* Turn on */
762 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
763 chip->vendor.iobase +
764 TPM_INT_ENABLE(chip->vendor.locality));
765 }
766 }
767
768 if (tpm_get_timeouts(chip)) {
769 dev_err(dev, "Could not get TPM timeouts and durations\n");
770 rc = -ENODEV;
771 goto out_err;
772 }
773
774 if (tpm_do_selftest(chip)) {
775 dev_err(dev, "TPM self test failed\n");
776 rc = -ENODEV;
777 goto out_err;
778 }
779
780 return tpm_chip_register(chip);
781 out_err:
782 tpm_tis_remove(chip);
783 return rc;
784 }
785
786 #ifdef CONFIG_PM_SLEEP
787 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
788 {
789 u32 intmask;
790
791 /* reenable interrupts that device may have lost or
792 BIOS/firmware may have disabled */
793 iowrite8(chip->vendor.irq, chip->vendor.iobase +
794 TPM_INT_VECTOR(chip->vendor.locality));
795
796 intmask =
797 ioread32(chip->vendor.iobase +
798 TPM_INT_ENABLE(chip->vendor.locality));
799
800 intmask |= TPM_INTF_CMD_READY_INT
801 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
802 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
803
804 iowrite32(intmask,
805 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
806 }
807
808 static int tpm_tis_resume(struct device *dev)
809 {
810 struct tpm_chip *chip = dev_get_drvdata(dev);
811 int ret;
812
813 if (chip->vendor.irq)
814 tpm_tis_reenable_interrupts(chip);
815
816 ret = tpm_pm_resume(dev);
817 if (!ret)
818 tpm_do_selftest(chip);
819
820 return ret;
821 }
822 #endif
823
824 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
825
826 #ifdef CONFIG_PNP
827 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
828 const struct pnp_device_id *pnp_id)
829 {
830 resource_size_t start, len;
831 unsigned int irq = 0;
832 acpi_handle acpi_dev_handle = NULL;
833
834 start = pnp_mem_start(pnp_dev, 0);
835 len = pnp_mem_len(pnp_dev, 0);
836
837 if (pnp_irq_valid(pnp_dev, 0))
838 irq = pnp_irq(pnp_dev, 0);
839 else
840 interrupts = false;
841
842 if (is_itpm(pnp_dev))
843 itpm = true;
844
845 if (pnp_acpi_device(pnp_dev))
846 acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
847
848 return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq);
849 }
850
851 static struct pnp_device_id tpm_pnp_tbl[] = {
852 {"PNP0C31", 0}, /* TPM */
853 {"ATM1200", 0}, /* Atmel */
854 {"IFX0102", 0}, /* Infineon */
855 {"BCM0101", 0}, /* Broadcom */
856 {"BCM0102", 0}, /* Broadcom */
857 {"NSC1200", 0}, /* National */
858 {"ICO0102", 0}, /* Intel */
859 /* Add new here */
860 {"", 0}, /* User Specified */
861 {"", 0} /* Terminator */
862 };
863 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
864
865 static void tpm_tis_pnp_remove(struct pnp_dev *dev)
866 {
867 struct tpm_chip *chip = pnp_get_drvdata(dev);
868 tpm_chip_unregister(chip);
869 tpm_tis_remove(chip);
870 }
871
872 static struct pnp_driver tis_pnp_driver = {
873 .name = "tpm_tis",
874 .id_table = tpm_pnp_tbl,
875 .probe = tpm_tis_pnp_init,
876 .remove = tpm_tis_pnp_remove,
877 .driver = {
878 .pm = &tpm_tis_pm,
879 },
880 };
881
882 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
883 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
884 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
885 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
886 #endif
887
888 static struct platform_driver tis_drv = {
889 .driver = {
890 .name = "tpm_tis",
891 .pm = &tpm_tis_pm,
892 },
893 };
894
895 static struct platform_device *pdev;
896
897 static bool force;
898 module_param(force, bool, 0444);
899 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
900 static int __init init_tis(void)
901 {
902 int rc;
903 #ifdef CONFIG_PNP
904 if (!force)
905 return pnp_register_driver(&tis_pnp_driver);
906 #endif
907
908 rc = platform_driver_register(&tis_drv);
909 if (rc < 0)
910 return rc;
911 pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
912 if (IS_ERR(pdev)) {
913 rc = PTR_ERR(pdev);
914 goto err_dev;
915 }
916 rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0);
917 if (rc)
918 goto err_init;
919 return 0;
920 err_init:
921 platform_device_unregister(pdev);
922 err_dev:
923 platform_driver_unregister(&tis_drv);
924 return rc;
925 }
926
927 static void __exit cleanup_tis(void)
928 {
929 struct tpm_chip *chip;
930 #ifdef CONFIG_PNP
931 if (!force) {
932 pnp_unregister_driver(&tis_pnp_driver);
933 return;
934 }
935 #endif
936 chip = dev_get_drvdata(&pdev->dev);
937 tpm_chip_unregister(chip);
938 tpm_tis_remove(chip);
939 platform_device_unregister(pdev);
940 platform_driver_unregister(&tis_drv);
941 }
942
943 module_init(init_tis);
944 module_exit(cleanup_tis);
945 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
946 MODULE_DESCRIPTION("TPM Driver");
947 MODULE_VERSION("2.0");
948 MODULE_LICENSE("GPL");
This page took 0.063109 seconds and 4 git commands to generate.