tpm_tis: Re-enable interrupts upon (S3) resume
[pandora-kernel.git] / drivers / char / tpm / tpm_tis.c
1 /*
2  * Copyright (C) 2005, 2006 IBM Corporation
3  *
4  * Authors:
5  * Leendert van Doorn <leendert@watson.ibm.com>
6  * Kylene Hall <kjhall@us.ibm.com>
7  *
8  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9  *
10  * Device driver for TCG/TCPA TPM (trusted platform module).
11  * Specifications at www.trustedcomputinggroup.org
12  *
13  * This device driver implements the TPM interface as defined in
14  * the TCG TPM Interface Spec version 1.2, revision 1.0.
15  *
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation, version 2 of the
19  * License.
20  */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include "tpm.h"
30
31 #define TPM_HEADER_SIZE 10
32
33 enum tis_access {
34         TPM_ACCESS_VALID = 0x80,
35         TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
36         TPM_ACCESS_REQUEST_PENDING = 0x04,
37         TPM_ACCESS_REQUEST_USE = 0x02,
38 };
39
40 enum tis_status {
41         TPM_STS_VALID = 0x80,
42         TPM_STS_COMMAND_READY = 0x40,
43         TPM_STS_GO = 0x20,
44         TPM_STS_DATA_AVAIL = 0x10,
45         TPM_STS_DATA_EXPECT = 0x08,
46 };
47
48 enum tis_int_flags {
49         TPM_GLOBAL_INT_ENABLE = 0x80000000,
50         TPM_INTF_BURST_COUNT_STATIC = 0x100,
51         TPM_INTF_CMD_READY_INT = 0x080,
52         TPM_INTF_INT_EDGE_FALLING = 0x040,
53         TPM_INTF_INT_EDGE_RISING = 0x020,
54         TPM_INTF_INT_LEVEL_LOW = 0x010,
55         TPM_INTF_INT_LEVEL_HIGH = 0x008,
56         TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
57         TPM_INTF_STS_VALID_INT = 0x002,
58         TPM_INTF_DATA_AVAIL_INT = 0x001,
59 };
60
61 enum tis_defaults {
62         TIS_MEM_BASE = 0xFED40000,
63         TIS_MEM_LEN = 0x5000,
64         TIS_SHORT_TIMEOUT = 750,        /* ms */
65         TIS_LONG_TIMEOUT = 2000,        /* 2 sec */
66 };
67
68 #define TPM_ACCESS(l)                   (0x0000 | ((l) << 12))
69 #define TPM_INT_ENABLE(l)               (0x0008 | ((l) << 12))
70 #define TPM_INT_VECTOR(l)               (0x000C | ((l) << 12))
71 #define TPM_INT_STATUS(l)               (0x0010 | ((l) << 12))
72 #define TPM_INTF_CAPS(l)                (0x0014 | ((l) << 12))
73 #define TPM_STS(l)                      (0x0018 | ((l) << 12))
74 #define TPM_DATA_FIFO(l)                (0x0024 | ((l) << 12))
75
76 #define TPM_DID_VID(l)                  (0x0F00 | ((l) << 12))
77 #define TPM_RID(l)                      (0x0F04 | ((l) << 12))
78
79 static LIST_HEAD(tis_chips);
80 static DEFINE_SPINLOCK(tis_lock);
81
82 #ifdef CONFIG_ACPI
83 static int is_itpm(struct pnp_dev *dev)
84 {
85         struct acpi_device *acpi = pnp_acpi_device(dev);
86         struct acpi_hardware_id *id;
87
88         list_for_each_entry(id, &acpi->pnp.ids, list) {
89                 if (!strcmp("INTC0102", id->id))
90                         return 1;
91         }
92
93         return 0;
94 }
95 #else
96 static int is_itpm(struct pnp_dev *dev)
97 {
98         return 0;
99 }
100 #endif
101
102 static int check_locality(struct tpm_chip *chip, int l)
103 {
104         if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
105              (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
106             (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
107                 return chip->vendor.locality = l;
108
109         return -1;
110 }
111
112 static void release_locality(struct tpm_chip *chip, int l, int force)
113 {
114         if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
115                       (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
116             (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
117                 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
118                          chip->vendor.iobase + TPM_ACCESS(l));
119 }
120
121 static int request_locality(struct tpm_chip *chip, int l)
122 {
123         unsigned long stop;
124         long rc;
125
126         if (check_locality(chip, l) >= 0)
127                 return l;
128
129         iowrite8(TPM_ACCESS_REQUEST_USE,
130                  chip->vendor.iobase + TPM_ACCESS(l));
131
132         if (chip->vendor.irq) {
133                 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
134                                                       (check_locality
135                                                        (chip, l) >= 0),
136                                                       chip->vendor.timeout_a);
137                 if (rc > 0)
138                         return l;
139
140         } else {
141                 /* wait for burstcount */
142                 stop = jiffies + chip->vendor.timeout_a;
143                 do {
144                         if (check_locality(chip, l) >= 0)
145                                 return l;
146                         msleep(TPM_TIMEOUT);
147                 }
148                 while (time_before(jiffies, stop));
149         }
150         return -1;
151 }
152
153 static u8 tpm_tis_status(struct tpm_chip *chip)
154 {
155         return ioread8(chip->vendor.iobase +
156                        TPM_STS(chip->vendor.locality));
157 }
158
159 static void tpm_tis_ready(struct tpm_chip *chip)
160 {
161         /* this causes the current command to be aborted */
162         iowrite8(TPM_STS_COMMAND_READY,
163                  chip->vendor.iobase + TPM_STS(chip->vendor.locality));
164 }
165
166 static int get_burstcount(struct tpm_chip *chip)
167 {
168         unsigned long stop;
169         int burstcnt;
170
171         /* wait for burstcount */
172         /* which timeout value, spec has 2 answers (c & d) */
173         stop = jiffies + chip->vendor.timeout_d;
174         do {
175                 burstcnt = ioread8(chip->vendor.iobase +
176                                    TPM_STS(chip->vendor.locality) + 1);
177                 burstcnt += ioread8(chip->vendor.iobase +
178                                     TPM_STS(chip->vendor.locality) +
179                                     2) << 8;
180                 if (burstcnt)
181                         return burstcnt;
182                 msleep(TPM_TIMEOUT);
183         } while (time_before(jiffies, stop));
184         return -EBUSY;
185 }
186
187 static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
188                          wait_queue_head_t *queue)
189 {
190         unsigned long stop;
191         long rc;
192         u8 status;
193
194         /* check current status */
195         status = tpm_tis_status(chip);
196         if ((status & mask) == mask)
197                 return 0;
198
199         if (chip->vendor.irq) {
200                 rc = wait_event_interruptible_timeout(*queue,
201                                                       ((tpm_tis_status
202                                                         (chip) & mask) ==
203                                                        mask), timeout);
204                 if (rc > 0)
205                         return 0;
206         } else {
207                 stop = jiffies + timeout;
208                 do {
209                         msleep(TPM_TIMEOUT);
210                         status = tpm_tis_status(chip);
211                         if ((status & mask) == mask)
212                                 return 0;
213                 } while (time_before(jiffies, stop));
214         }
215         return -ETIME;
216 }
217
218 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
219 {
220         int size = 0, burstcnt;
221         while (size < count &&
222                wait_for_stat(chip,
223                              TPM_STS_DATA_AVAIL | TPM_STS_VALID,
224                              chip->vendor.timeout_c,
225                              &chip->vendor.read_queue)
226                == 0) {
227                 burstcnt = get_burstcount(chip);
228                 for (; burstcnt > 0 && size < count; burstcnt--)
229                         buf[size++] = ioread8(chip->vendor.iobase +
230                                               TPM_DATA_FIFO(chip->vendor.
231                                                             locality));
232         }
233         return size;
234 }
235
236 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
237 {
238         int size = 0;
239         int expected, status;
240
241         if (count < TPM_HEADER_SIZE) {
242                 size = -EIO;
243                 goto out;
244         }
245
246         /* read first 10 bytes, including tag, paramsize, and result */
247         if ((size =
248              recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
249                 dev_err(chip->dev, "Unable to read header\n");
250                 goto out;
251         }
252
253         expected = be32_to_cpu(*(__be32 *) (buf + 2));
254         if (expected > count) {
255                 size = -EIO;
256                 goto out;
257         }
258
259         if ((size +=
260              recv_data(chip, &buf[TPM_HEADER_SIZE],
261                        expected - TPM_HEADER_SIZE)) < expected) {
262                 dev_err(chip->dev, "Unable to read remainder of result\n");
263                 size = -ETIME;
264                 goto out;
265         }
266
267         wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
268                       &chip->vendor.int_queue);
269         status = tpm_tis_status(chip);
270         if (status & TPM_STS_DATA_AVAIL) {      /* retry? */
271                 dev_err(chip->dev, "Error left over data\n");
272                 size = -EIO;
273                 goto out;
274         }
275
276 out:
277         tpm_tis_ready(chip);
278         release_locality(chip, chip->vendor.locality, 0);
279         return size;
280 }
281
282 static int itpm;
283 module_param(itpm, bool, 0444);
284 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
285
286 /*
287  * If interrupts are used (signaled by an irq set in the vendor structure)
288  * tpm.c can skip polling for the data to be available as the interrupt is
289  * waited for here
290  */
291 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
292 {
293         int rc, status, burstcnt;
294         size_t count = 0;
295         u32 ordinal;
296
297         if (request_locality(chip, 0) < 0)
298                 return -EBUSY;
299
300         status = tpm_tis_status(chip);
301         if ((status & TPM_STS_COMMAND_READY) == 0) {
302                 tpm_tis_ready(chip);
303                 if (wait_for_stat
304                     (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
305                      &chip->vendor.int_queue) < 0) {
306                         rc = -ETIME;
307                         goto out_err;
308                 }
309         }
310
311         while (count < len - 1) {
312                 burstcnt = get_burstcount(chip);
313                 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
314                         iowrite8(buf[count], chip->vendor.iobase +
315                                  TPM_DATA_FIFO(chip->vendor.locality));
316                         count++;
317                 }
318
319                 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
320                               &chip->vendor.int_queue);
321                 status = tpm_tis_status(chip);
322                 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
323                         rc = -EIO;
324                         goto out_err;
325                 }
326         }
327
328         /* write last byte */
329         iowrite8(buf[count],
330                  chip->vendor.iobase +
331                  TPM_DATA_FIFO(chip->vendor.locality));
332         wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
333                       &chip->vendor.int_queue);
334         status = tpm_tis_status(chip);
335         if ((status & TPM_STS_DATA_EXPECT) != 0) {
336                 rc = -EIO;
337                 goto out_err;
338         }
339
340         /* go and do it */
341         iowrite8(TPM_STS_GO,
342                  chip->vendor.iobase + TPM_STS(chip->vendor.locality));
343
344         if (chip->vendor.irq) {
345                 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
346                 if (wait_for_stat
347                     (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
348                      tpm_calc_ordinal_duration(chip, ordinal),
349                      &chip->vendor.read_queue) < 0) {
350                         rc = -ETIME;
351                         goto out_err;
352                 }
353         }
354         return len;
355 out_err:
356         tpm_tis_ready(chip);
357         release_locality(chip, chip->vendor.locality, 0);
358         return rc;
359 }
360
361 static const struct file_operations tis_ops = {
362         .owner = THIS_MODULE,
363         .llseek = no_llseek,
364         .open = tpm_open,
365         .read = tpm_read,
366         .write = tpm_write,
367         .release = tpm_release,
368 };
369
370 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
371 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
372 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
373 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
374 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
375 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376                    NULL);
377 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
379 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
380 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
381
382 static struct attribute *tis_attrs[] = {
383         &dev_attr_pubek.attr,
384         &dev_attr_pcrs.attr,
385         &dev_attr_enabled.attr,
386         &dev_attr_active.attr,
387         &dev_attr_owned.attr,
388         &dev_attr_temp_deactivated.attr,
389         &dev_attr_caps.attr,
390         &dev_attr_cancel.attr,
391         &dev_attr_durations.attr,
392         &dev_attr_timeouts.attr, NULL,
393 };
394
395 static struct attribute_group tis_attr_grp = {
396         .attrs = tis_attrs
397 };
398
399 static struct tpm_vendor_specific tpm_tis = {
400         .status = tpm_tis_status,
401         .recv = tpm_tis_recv,
402         .send = tpm_tis_send,
403         .cancel = tpm_tis_ready,
404         .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
405         .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
406         .req_canceled = TPM_STS_COMMAND_READY,
407         .attr_group = &tis_attr_grp,
408         .miscdev = {
409                     .fops = &tis_ops,},
410 };
411
412 static irqreturn_t tis_int_probe(int irq, void *dev_id)
413 {
414         struct tpm_chip *chip = dev_id;
415         u32 interrupt;
416
417         interrupt = ioread32(chip->vendor.iobase +
418                              TPM_INT_STATUS(chip->vendor.locality));
419
420         if (interrupt == 0)
421                 return IRQ_NONE;
422
423         chip->vendor.irq = irq;
424
425         /* Clear interrupts handled with TPM_EOI */
426         iowrite32(interrupt,
427                   chip->vendor.iobase +
428                   TPM_INT_STATUS(chip->vendor.locality));
429         return IRQ_HANDLED;
430 }
431
432 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
433 {
434         struct tpm_chip *chip = dev_id;
435         u32 interrupt;
436         int i;
437
438         interrupt = ioread32(chip->vendor.iobase +
439                              TPM_INT_STATUS(chip->vendor.locality));
440
441         if (interrupt == 0)
442                 return IRQ_NONE;
443
444         if (interrupt & TPM_INTF_DATA_AVAIL_INT)
445                 wake_up_interruptible(&chip->vendor.read_queue);
446         if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
447                 for (i = 0; i < 5; i++)
448                         if (check_locality(chip, i) >= 0)
449                                 break;
450         if (interrupt &
451             (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
452              TPM_INTF_CMD_READY_INT))
453                 wake_up_interruptible(&chip->vendor.int_queue);
454
455         /* Clear interrupts handled with TPM_EOI */
456         iowrite32(interrupt,
457                   chip->vendor.iobase +
458                   TPM_INT_STATUS(chip->vendor.locality));
459         ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
460         return IRQ_HANDLED;
461 }
462
463 static int interrupts = 1;
464 module_param(interrupts, bool, 0444);
465 MODULE_PARM_DESC(interrupts, "Enable interrupts");
466
467 static int tpm_tis_init(struct device *dev, resource_size_t start,
468                         resource_size_t len, unsigned int irq)
469 {
470         u32 vendor, intfcaps, intmask;
471         int rc, i;
472         struct tpm_chip *chip;
473
474         if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
475                 return -ENODEV;
476
477         chip->vendor.iobase = ioremap(start, len);
478         if (!chip->vendor.iobase) {
479                 rc = -EIO;
480                 goto out_err;
481         }
482
483         /* Default timeouts */
484         chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
485         chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
486         chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
487         chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
488
489         if (request_locality(chip, 0) != 0) {
490                 rc = -ENODEV;
491                 goto out_err;
492         }
493
494         vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
495
496         dev_info(dev,
497                  "1.2 TPM (device-id 0x%X, rev-id %d)\n",
498                  vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
499
500         if (itpm)
501                 dev_info(dev, "Intel iTPM workaround enabled\n");
502
503
504         /* Figure out the capabilities */
505         intfcaps =
506             ioread32(chip->vendor.iobase +
507                      TPM_INTF_CAPS(chip->vendor.locality));
508         dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
509                 intfcaps);
510         if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
511                 dev_dbg(dev, "\tBurst Count Static\n");
512         if (intfcaps & TPM_INTF_CMD_READY_INT)
513                 dev_dbg(dev, "\tCommand Ready Int Support\n");
514         if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
515                 dev_dbg(dev, "\tInterrupt Edge Falling\n");
516         if (intfcaps & TPM_INTF_INT_EDGE_RISING)
517                 dev_dbg(dev, "\tInterrupt Edge Rising\n");
518         if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
519                 dev_dbg(dev, "\tInterrupt Level Low\n");
520         if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
521                 dev_dbg(dev, "\tInterrupt Level High\n");
522         if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
523                 dev_dbg(dev, "\tLocality Change Int Support\n");
524         if (intfcaps & TPM_INTF_STS_VALID_INT)
525                 dev_dbg(dev, "\tSts Valid Int Support\n");
526         if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
527                 dev_dbg(dev, "\tData Avail Int Support\n");
528
529         /* INTERRUPT Setup */
530         init_waitqueue_head(&chip->vendor.read_queue);
531         init_waitqueue_head(&chip->vendor.int_queue);
532
533         intmask =
534             ioread32(chip->vendor.iobase +
535                      TPM_INT_ENABLE(chip->vendor.locality));
536
537         intmask |= TPM_INTF_CMD_READY_INT
538             | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
539             | TPM_INTF_STS_VALID_INT;
540
541         iowrite32(intmask,
542                   chip->vendor.iobase +
543                   TPM_INT_ENABLE(chip->vendor.locality));
544         if (interrupts)
545                 chip->vendor.irq = irq;
546         if (interrupts && !chip->vendor.irq) {
547                 chip->vendor.irq =
548                     ioread8(chip->vendor.iobase +
549                             TPM_INT_VECTOR(chip->vendor.locality));
550
551                 for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
552                         iowrite8(i, chip->vendor.iobase +
553                                     TPM_INT_VECTOR(chip->vendor.locality));
554                         if (request_irq
555                             (i, tis_int_probe, IRQF_SHARED,
556                              chip->vendor.miscdev.name, chip) != 0) {
557                                 dev_info(chip->dev,
558                                          "Unable to request irq: %d for probe\n",
559                                          i);
560                                 continue;
561                         }
562
563                         /* Clear all existing */
564                         iowrite32(ioread32
565                                   (chip->vendor.iobase +
566                                    TPM_INT_STATUS(chip->vendor.locality)),
567                                   chip->vendor.iobase +
568                                   TPM_INT_STATUS(chip->vendor.locality));
569
570                         /* Turn on */
571                         iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
572                                   chip->vendor.iobase +
573                                   TPM_INT_ENABLE(chip->vendor.locality));
574
575                         /* Generate Interrupts */
576                         tpm_gen_interrupt(chip);
577
578                         /* Turn off */
579                         iowrite32(intmask,
580                                   chip->vendor.iobase +
581                                   TPM_INT_ENABLE(chip->vendor.locality));
582                         free_irq(i, chip);
583                 }
584         }
585         if (chip->vendor.irq) {
586                 iowrite8(chip->vendor.irq,
587                          chip->vendor.iobase +
588                          TPM_INT_VECTOR(chip->vendor.locality));
589                 if (request_irq
590                     (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
591                      chip->vendor.miscdev.name, chip) != 0) {
592                         dev_info(chip->dev,
593                                  "Unable to request irq: %d for use\n",
594                                  chip->vendor.irq);
595                         chip->vendor.irq = 0;
596                 } else {
597                         /* Clear all existing */
598                         iowrite32(ioread32
599                                   (chip->vendor.iobase +
600                                    TPM_INT_STATUS(chip->vendor.locality)),
601                                   chip->vendor.iobase +
602                                   TPM_INT_STATUS(chip->vendor.locality));
603
604                         /* Turn on */
605                         iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
606                                   chip->vendor.iobase +
607                                   TPM_INT_ENABLE(chip->vendor.locality));
608                 }
609         }
610
611         INIT_LIST_HEAD(&chip->vendor.list);
612         spin_lock(&tis_lock);
613         list_add(&chip->vendor.list, &tis_chips);
614         spin_unlock(&tis_lock);
615
616         tpm_get_timeouts(chip);
617         tpm_continue_selftest(chip);
618
619         return 0;
620 out_err:
621         if (chip->vendor.iobase)
622                 iounmap(chip->vendor.iobase);
623         tpm_remove_hardware(chip->dev);
624         return rc;
625 }
626 #ifdef CONFIG_PNP
627 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
628                                       const struct pnp_device_id *pnp_id)
629 {
630         resource_size_t start, len;
631         unsigned int irq = 0;
632
633         start = pnp_mem_start(pnp_dev, 0);
634         len = pnp_mem_len(pnp_dev, 0);
635
636         if (pnp_irq_valid(pnp_dev, 0))
637                 irq = pnp_irq(pnp_dev, 0);
638         else
639                 interrupts = 0;
640
641         if (is_itpm(pnp_dev))
642                 itpm = 1;
643
644         return tpm_tis_init(&pnp_dev->dev, start, len, irq);
645 }
646
647 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
648 {
649         return tpm_pm_suspend(&dev->dev, msg);
650 }
651
652 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
653 {
654         u32 intmask;
655
656         /* reenable interrupts that device may have lost or
657            BIOS/firmware may have disabled */
658         iowrite8(chip->vendor.irq, chip->vendor.iobase +
659                  TPM_INT_VECTOR(chip->vendor.locality));
660
661         intmask =
662             ioread32(chip->vendor.iobase +
663                      TPM_INT_ENABLE(chip->vendor.locality));
664
665         intmask |= TPM_INTF_CMD_READY_INT
666             | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
667             | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
668
669         iowrite32(intmask,
670                   chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
671 }
672
673
674 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
675 {
676         struct tpm_chip *chip = pnp_get_drvdata(dev);
677         int ret;
678
679         if (chip->vendor.irq)
680                 tpm_tis_reenable_interrupts(chip);
681
682         ret = tpm_pm_resume(&dev->dev);
683         if (!ret)
684                 tpm_continue_selftest(chip);
685
686         return ret;
687 }
688
689 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
690         {"PNP0C31", 0},         /* TPM */
691         {"ATM1200", 0},         /* Atmel */
692         {"IFX0102", 0},         /* Infineon */
693         {"BCM0101", 0},         /* Broadcom */
694         {"BCM0102", 0},         /* Broadcom */
695         {"NSC1200", 0},         /* National */
696         {"ICO0102", 0},         /* Intel */
697         /* Add new here */
698         {"", 0},                /* User Specified */
699         {"", 0}                 /* Terminator */
700 };
701 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
702
703 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
704 {
705         struct tpm_chip *chip = pnp_get_drvdata(dev);
706
707         tpm_dev_vendor_release(chip);
708
709         kfree(chip);
710 }
711
712
713 static struct pnp_driver tis_pnp_driver = {
714         .name = "tpm_tis",
715         .id_table = tpm_pnp_tbl,
716         .probe = tpm_tis_pnp_init,
717         .suspend = tpm_tis_pnp_suspend,
718         .resume = tpm_tis_pnp_resume,
719         .remove = tpm_tis_pnp_remove,
720 };
721
722 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
723 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
724                     sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
725 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
726 #endif
727 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
728 {
729         return tpm_pm_suspend(&dev->dev, msg);
730 }
731
732 static int tpm_tis_resume(struct platform_device *dev)
733 {
734         struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
735
736         if (chip->vendor.irq)
737                 tpm_tis_reenable_interrupts(chip);
738
739         return tpm_pm_resume(&dev->dev);
740 }
741 static struct platform_driver tis_drv = {
742         .driver = {
743                 .name = "tpm_tis",
744                 .owner          = THIS_MODULE,
745         },
746         .suspend = tpm_tis_suspend,
747         .resume = tpm_tis_resume,
748 };
749
750 static struct platform_device *pdev;
751
752 static int force;
753 module_param(force, bool, 0444);
754 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
755 static int __init init_tis(void)
756 {
757         int rc;
758 #ifdef CONFIG_PNP
759         if (!force)
760                 return pnp_register_driver(&tis_pnp_driver);
761 #endif
762
763         rc = platform_driver_register(&tis_drv);
764         if (rc < 0)
765                 return rc;
766         if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
767                 return PTR_ERR(pdev);
768         if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
769                 platform_device_unregister(pdev);
770                 platform_driver_unregister(&tis_drv);
771         }
772         return rc;
773 }
774
775 static void __exit cleanup_tis(void)
776 {
777         struct tpm_vendor_specific *i, *j;
778         struct tpm_chip *chip;
779         spin_lock(&tis_lock);
780         list_for_each_entry_safe(i, j, &tis_chips, list) {
781                 chip = to_tpm_chip(i);
782                 tpm_remove_hardware(chip->dev);
783                 iowrite32(~TPM_GLOBAL_INT_ENABLE &
784                           ioread32(chip->vendor.iobase +
785                                    TPM_INT_ENABLE(chip->vendor.
786                                                   locality)),
787                           chip->vendor.iobase +
788                           TPM_INT_ENABLE(chip->vendor.locality));
789                 release_locality(chip, chip->vendor.locality, 1);
790                 if (chip->vendor.irq)
791                         free_irq(chip->vendor.irq, chip);
792                 iounmap(i->iobase);
793                 list_del(&i->list);
794         }
795         spin_unlock(&tis_lock);
796 #ifdef CONFIG_PNP
797         if (!force) {
798                 pnp_unregister_driver(&tis_pnp_driver);
799                 return;
800         }
801 #endif
802         platform_device_unregister(pdev);
803         platform_driver_unregister(&tis_drv);
804 }
805
806 module_init(init_tis);
807 module_exit(cleanup_tis);
808 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
809 MODULE_DESCRIPTION("TPM Driver");
810 MODULE_VERSION("2.0");
811 MODULE_LICENSE("GPL");