aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/hw_random/n2-drv.c29
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/msm_smd_pkt.c5
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/ramoops.c101
-rw-r--r--drivers/char/random.c349
-rw-r--r--drivers/char/tile-srom.c481
-rw-r--r--drivers/char/tpm/tpm.c102
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm_nsc.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c179
14 files changed, 853 insertions, 433 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc5360a..423fd56bf612 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,5 +616,16 @@ config MSM_SMD_PKT
616 Enables userspace clients to read and write to some packet SMD 616 Enables userspace clients to read and write to some packet SMD
617 ports via device interface for MSM chipset. 617 ports via device interface for MSM chipset.
618 618
619config TILE_SROM
620 bool "Character-device access via hypervisor to the Tilera SPI ROM"
621 depends on TILE
622 default y
623 ---help---
624 This device provides character-level read-write access
625 to the SROM, typically via the "0", "1", and "2" devices
626 in /dev/srom/. The Tilera hypervisor makes the flash
627 device appear much like a simple EEPROM, and knows
628 how to partition a single ROM for multiple purposes.
629
619endmenu 630endmenu
620 631
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672bd85d..32762ba769c2 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
63 63
64obj-$(CONFIG_JS_RTC) += js-rtc.o 64obj-$(CONFIG_JS_RTC) += js-rtc.o
65js-rtc-y = rtc.o 65js-rtc-y = rtc.o
66
67obj-$(CONFIG_TILE_SROM) += tile-srom.o
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ac6739e085e3..c3de70de00d4 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -1,6 +1,6 @@
1/* n2-drv.c: Niagara-2 RNG driver. 1/* n2-drv.c: Niagara-2 RNG driver.
2 * 2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -22,8 +22,8 @@
22 22
23#define DRV_MODULE_NAME "n2rng" 23#define DRV_MODULE_NAME "n2rng"
24#define PFX DRV_MODULE_NAME ": " 24#define PFX DRV_MODULE_NAME ": "
25#define DRV_MODULE_VERSION "0.1" 25#define DRV_MODULE_VERSION "0.2"
26#define DRV_MODULE_RELDATE "May 15, 2008" 26#define DRV_MODULE_RELDATE "July 27, 2011"
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[];
623static int __devinit n2rng_probe(struct platform_device *op) 623static int __devinit n2rng_probe(struct platform_device *op)
624{ 624{
625 const struct of_device_id *match; 625 const struct of_device_id *match;
626 int victoria_falls; 626 int multi_capable;
627 int err = -ENOMEM; 627 int err = -ENOMEM;
628 struct n2rng *np; 628 struct n2rng *np;
629 629
630 match = of_match_device(n2rng_match, &op->dev); 630 match = of_match_device(n2rng_match, &op->dev);
631 if (!match) 631 if (!match)
632 return -EINVAL; 632 return -EINVAL;
633 victoria_falls = (match->data != NULL); 633 multi_capable = (match->data != NULL);
634 634
635 n2rng_driver_version(); 635 n2rng_driver_version();
636 np = kzalloc(sizeof(*np), GFP_KERNEL); 636 np = kzalloc(sizeof(*np), GFP_KERNEL);
@@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
640 640
641 INIT_DELAYED_WORK(&np->work, n2rng_work); 641 INIT_DELAYED_WORK(&np->work, n2rng_work);
642 642
643 if (victoria_falls) 643 if (multi_capable)
644 np->flags |= N2RNG_FLAG_VF; 644 np->flags |= N2RNG_FLAG_MULTI;
645 645
646 err = -ENODEV; 646 err = -ENODEV;
647 np->hvapi_major = 2; 647 np->hvapi_major = 2;
@@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op)
658 } 658 }
659 } 659 }
660 660
661 if (np->flags & N2RNG_FLAG_VF) { 661 if (np->flags & N2RNG_FLAG_MULTI) {
662 if (np->hvapi_major < 2) { 662 if (np->hvapi_major < 2) {
663 dev_err(&op->dev, "VF RNG requires HVAPI major " 663 dev_err(&op->dev, "multi-unit-capable RNG requires "
664 "version 2 or later, got %lu\n", 664 "HVAPI major version 2 or later, got %lu\n",
665 np->hvapi_major); 665 np->hvapi_major);
666 goto out_hvapi_unregister; 666 goto out_hvapi_unregister;
667 } 667 }
@@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
688 goto out_free_units; 688 goto out_free_units;
689 689
690 dev_info(&op->dev, "Found %s RNG, units: %d\n", 690 dev_info(&op->dev, "Found %s RNG, units: %d\n",
691 ((np->flags & N2RNG_FLAG_VF) ? 691 ((np->flags & N2RNG_FLAG_MULTI) ?
692 "Victoria Falls" : "Niagara2"), 692 "multi-unit-capable" : "single-unit"),
693 np->num_units); 693 np->num_units);
694 694
695 np->hwrng.name = "n2rng"; 695 np->hwrng.name = "n2rng";
@@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = {
751 .compatible = "SUNW,vf-rng", 751 .compatible = "SUNW,vf-rng",
752 .data = (void *) 1, 752 .data = (void *) 1,
753 }, 753 },
754 {
755 .name = "random-number-generator",
756 .compatible = "SUNW,kt-rng",
757 .data = (void *) 1,
758 },
754 {}, 759 {},
755}; 760};
756MODULE_DEVICE_TABLE(of, n2rng_match); 761MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 4bea07f30978..f244ac89087f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -68,7 +68,7 @@ struct n2rng {
68 struct platform_device *op; 68 struct platform_device *op;
69 69
70 unsigned long flags; 70 unsigned long flags;
71#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */ 71#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
72#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */ 72#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
73#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */ 73#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
74#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */ 74#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 320668f4c3aa..3302586655c4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -52,7 +52,7 @@
52#include <linux/string.h> 52#include <linux/string.h>
53#include <linux/ctype.h> 53#include <linux/ctype.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <asm/atomic.h> 55#include <linux/atomic.h>
56 56
57#ifdef CONFIG_X86 57#ifdef CONFIG_X86
58/* 58/*
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65c9960..8eca55deb3a3 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)
379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { 379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), 380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (IS_ERR(smd_pkt_devp[i])) { 382 if (!smd_pkt_devp[i]) {
383 r = PTR_ERR(smd_pkt_devp[i]); 383 pr_err("kmalloc() failed\n");
384 pr_err("kmalloc() failed %d\n", r);
385 goto clean_cdevs; 384 goto clean_cdevs;
386 } 385 }
387 386
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 25d139c9dbed..5c0d96a820fa 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -46,7 +46,7 @@
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/system.h> 47#include <asm/system.h>
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49#include <asm/atomic.h> 49#include <linux/atomic.h>
50#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
51#include <asm/uncached.h> 51#include <asm/uncached.h>
52#include <asm/sn/addrs.h> 52#include <asm/sn/addrs.h>
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 1a9f5f6d6ac5..810aff9e750f 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -19,18 +19,26 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/err.h>
23#include <linux/module.h> 26#include <linux/module.h>
24#include <linux/kmsg_dump.h> 27#include <linux/kmsg_dump.h>
25#include <linux/time.h> 28#include <linux/time.h>
26#include <linux/io.h> 29#include <linux/io.h>
27#include <linux/ioport.h> 30#include <linux/ioport.h>
28#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/slab.h>
29#include <linux/ramoops.h> 33#include <linux/ramoops.h>
30 34
31#define RAMOOPS_KERNMSG_HDR "====" 35#define RAMOOPS_KERNMSG_HDR "===="
36#define MIN_MEM_SIZE 4096UL
32 37
33#define RECORD_SIZE 4096UL 38static ulong record_size = MIN_MEM_SIZE;
39module_param(record_size, ulong, 0400);
40MODULE_PARM_DESC(record_size,
41 "size of each dump done on oops/panic");
34 42
35static ulong mem_address; 43static ulong mem_address;
36module_param(mem_address, ulong, 0400); 44module_param(mem_address, ulong, 0400);
@@ -52,10 +60,15 @@ static struct ramoops_context {
52 void *virt_addr; 60 void *virt_addr;
53 phys_addr_t phys_addr; 61 phys_addr_t phys_addr;
54 unsigned long size; 62 unsigned long size;
63 unsigned long record_size;
64 int dump_oops;
55 int count; 65 int count;
56 int max_count; 66 int max_count;
57} oops_cxt; 67} oops_cxt;
58 68
69static struct platform_device *dummy;
70static struct ramoops_platform_data *dummy_data;
71
59static void ramoops_do_dump(struct kmsg_dumper *dumper, 72static void ramoops_do_dump(struct kmsg_dumper *dumper,
60 enum kmsg_dump_reason reason, const char *s1, unsigned long l1, 73 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
61 const char *s2, unsigned long l2) 74 const char *s2, unsigned long l2)
@@ -74,13 +87,13 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
74 return; 87 return;
75 88
76 /* Only dump oopses if dump_oops is set */ 89 /* Only dump oopses if dump_oops is set */
77 if (reason == KMSG_DUMP_OOPS && !dump_oops) 90 if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
78 return; 91 return;
79 92
80 buf = cxt->virt_addr + (cxt->count * RECORD_SIZE); 93 buf = cxt->virt_addr + (cxt->count * cxt->record_size);
81 buf_orig = buf; 94 buf_orig = buf;
82 95
83 memset(buf, '\0', RECORD_SIZE); 96 memset(buf, '\0', cxt->record_size);
84 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); 97 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
85 buf += res; 98 buf += res;
86 do_gettimeofday(&timestamp); 99 do_gettimeofday(&timestamp);
@@ -88,8 +101,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
88 buf += res; 101 buf += res;
89 102
90 hdr_size = buf - buf_orig; 103 hdr_size = buf - buf_orig;
91 l2_cpy = min(l2, RECORD_SIZE - hdr_size); 104 l2_cpy = min(l2, cxt->record_size - hdr_size);
92 l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy); 105 l1_cpy = min(l1, cxt->record_size - hdr_size - l2_cpy);
93 106
94 s2_start = l2 - l2_cpy; 107 s2_start = l2 - l2_cpy;
95 s1_start = l1 - l1_cpy; 108 s1_start = l1 - l1_cpy;
@@ -106,44 +119,59 @@ static int __init ramoops_probe(struct platform_device *pdev)
106 struct ramoops_context *cxt = &oops_cxt; 119 struct ramoops_context *cxt = &oops_cxt;
107 int err = -EINVAL; 120 int err = -EINVAL;
108 121
109 if (pdata) { 122 if (!pdata->mem_size || !pdata->record_size) {
110 mem_size = pdata->mem_size; 123 pr_err("The memory size and the record size must be "
111 mem_address = pdata->mem_address; 124 "non-zero\n");
125 goto fail3;
112 } 126 }
113 127
114 if (!mem_size) { 128 rounddown_pow_of_two(pdata->mem_size);
115 printk(KERN_ERR "ramoops: invalid size specification"); 129 rounddown_pow_of_two(pdata->record_size);
130
131 /* Check for the minimum memory size */
132 if (pdata->mem_size < MIN_MEM_SIZE &&
133 pdata->record_size < MIN_MEM_SIZE) {
134 pr_err("memory size too small, minium is %lu\n", MIN_MEM_SIZE);
116 goto fail3; 135 goto fail3;
117 } 136 }
118 137
119 rounddown_pow_of_two(mem_size); 138 if (pdata->mem_size < pdata->record_size) {
120 139 pr_err("The memory size must be larger than the "
121 if (mem_size < RECORD_SIZE) { 140 "records size\n");
122 printk(KERN_ERR "ramoops: size too small");
123 goto fail3; 141 goto fail3;
124 } 142 }
125 143
126 cxt->max_count = mem_size / RECORD_SIZE; 144 cxt->max_count = pdata->mem_size / pdata->record_size;
127 cxt->count = 0; 145 cxt->count = 0;
128 cxt->size = mem_size; 146 cxt->size = pdata->mem_size;
129 cxt->phys_addr = mem_address; 147 cxt->phys_addr = pdata->mem_address;
148 cxt->record_size = pdata->record_size;
149 cxt->dump_oops = pdata->dump_oops;
150 /*
151 * Update the module parameter variables as well so they are visible
152 * through /sys/module/ramoops/parameters/
153 */
154 mem_size = pdata->mem_size;
155 mem_address = pdata->mem_address;
156 record_size = pdata->record_size;
157 dump_oops = pdata->dump_oops;
130 158
131 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { 159 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
132 printk(KERN_ERR "ramoops: request mem region failed"); 160 pr_err("request mem region failed\n");
133 err = -EINVAL; 161 err = -EINVAL;
134 goto fail3; 162 goto fail3;
135 } 163 }
136 164
137 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); 165 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
138 if (!cxt->virt_addr) { 166 if (!cxt->virt_addr) {
139 printk(KERN_ERR "ramoops: ioremap failed"); 167 pr_err("ioremap failed\n");
140 goto fail2; 168 goto fail2;
141 } 169 }
142 170
143 cxt->dump.dump = ramoops_do_dump; 171 cxt->dump.dump = ramoops_do_dump;
144 err = kmsg_dump_register(&cxt->dump); 172 err = kmsg_dump_register(&cxt->dump);
145 if (err) { 173 if (err) {
146 printk(KERN_ERR "ramoops: registering kmsg dumper failed"); 174 pr_err("registering kmsg dumper failed\n");
147 goto fail1; 175 goto fail1;
148 } 176 }
149 177
@@ -162,7 +190,7 @@ static int __exit ramoops_remove(struct platform_device *pdev)
162 struct ramoops_context *cxt = &oops_cxt; 190 struct ramoops_context *cxt = &oops_cxt;
163 191
164 if (kmsg_dump_unregister(&cxt->dump) < 0) 192 if (kmsg_dump_unregister(&cxt->dump) < 0)
165 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); 193 pr_warn("could not unregister kmsg_dumper\n");
166 194
167 iounmap(cxt->virt_addr); 195 iounmap(cxt->virt_addr);
168 release_mem_region(cxt->phys_addr, cxt->size); 196 release_mem_region(cxt->phys_addr, cxt->size);
@@ -179,12 +207,39 @@ static struct platform_driver ramoops_driver = {
179 207
180static int __init ramoops_init(void) 208static int __init ramoops_init(void)
181{ 209{
182 return platform_driver_probe(&ramoops_driver, ramoops_probe); 210 int ret;
211 ret = platform_driver_probe(&ramoops_driver, ramoops_probe);
212 if (ret == -ENODEV) {
213 /*
214 * If we didn't find a platform device, we use module parameters
215 * building platform data on the fly.
216 */
217 pr_info("platform device not found, using module parameters\n");
218 dummy_data = kzalloc(sizeof(struct ramoops_platform_data),
219 GFP_KERNEL);
220 if (!dummy_data)
221 return -ENOMEM;
222 dummy_data->mem_size = mem_size;
223 dummy_data->mem_address = mem_address;
224 dummy_data->record_size = record_size;
225 dummy_data->dump_oops = dump_oops;
226 dummy = platform_create_bundle(&ramoops_driver, ramoops_probe,
227 NULL, 0, dummy_data,
228 sizeof(struct ramoops_platform_data));
229
230 if (IS_ERR(dummy))
231 ret = PTR_ERR(dummy);
232 else
233 ret = 0;
234 }
235
236 return ret;
183} 237}
184 238
185static void __exit ramoops_exit(void) 239static void __exit ramoops_exit(void)
186{ 240{
187 platform_driver_unregister(&ramoops_driver); 241 platform_driver_unregister(&ramoops_driver);
242 kfree(dummy_data);
188} 243}
189 244
190module_init(ramoops_init); 245module_init(ramoops_init);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 729281961f22..c35a785005b0 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1300,345 +1300,14 @@ ctl_table random_table[] = {
1300}; 1300};
1301#endif /* CONFIG_SYSCTL */ 1301#endif /* CONFIG_SYSCTL */
1302 1302
1303/******************************************************************** 1303static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1304 *
1305 * Random functions for networking
1306 *
1307 ********************************************************************/
1308
1309/*
1310 * TCP initial sequence number picking. This uses the random number
1311 * generator to pick an initial secret value. This value is hashed
1312 * along with the TCP endpoint information to provide a unique
1313 * starting point for each pair of TCP endpoints. This defeats
1314 * attacks which rely on guessing the initial TCP sequence number.
1315 * This algorithm was suggested by Steve Bellovin.
1316 *
1317 * Using a very strong hash was taking an appreciable amount of the total
1318 * TCP connection establishment time, so this is a weaker hash,
1319 * compensated for by changing the secret periodically.
1320 */
1321
1322/* F, G and H are basic MD4 functions: selection, majority, parity */
1323#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
1324#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
1325#define H(x, y, z) ((x) ^ (y) ^ (z))
1326
1327/*
1328 * The generic round function. The application is so specific that
1329 * we don't bother protecting all the arguments with parens, as is generally
1330 * good macro practice, in favor of extra legibility.
1331 * Rotation is separate from addition to prevent recomputation
1332 */
1333#define ROUND(f, a, b, c, d, x, s) \
1334 (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
1335#define K1 0
1336#define K2 013240474631UL
1337#define K3 015666365641UL
1338
1339#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1340
1341static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1342{
1343 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1344
1345 /* Round 1 */
1346 ROUND(F, a, b, c, d, in[ 0] + K1, 3);
1347 ROUND(F, d, a, b, c, in[ 1] + K1, 7);
1348 ROUND(F, c, d, a, b, in[ 2] + K1, 11);
1349 ROUND(F, b, c, d, a, in[ 3] + K1, 19);
1350 ROUND(F, a, b, c, d, in[ 4] + K1, 3);
1351 ROUND(F, d, a, b, c, in[ 5] + K1, 7);
1352 ROUND(F, c, d, a, b, in[ 6] + K1, 11);
1353 ROUND(F, b, c, d, a, in[ 7] + K1, 19);
1354 ROUND(F, a, b, c, d, in[ 8] + K1, 3);
1355 ROUND(F, d, a, b, c, in[ 9] + K1, 7);
1356 ROUND(F, c, d, a, b, in[10] + K1, 11);
1357 ROUND(F, b, c, d, a, in[11] + K1, 19);
1358
1359 /* Round 2 */
1360 ROUND(G, a, b, c, d, in[ 1] + K2, 3);
1361 ROUND(G, d, a, b, c, in[ 3] + K2, 5);
1362 ROUND(G, c, d, a, b, in[ 5] + K2, 9);
1363 ROUND(G, b, c, d, a, in[ 7] + K2, 13);
1364 ROUND(G, a, b, c, d, in[ 9] + K2, 3);
1365 ROUND(G, d, a, b, c, in[11] + K2, 5);
1366 ROUND(G, c, d, a, b, in[ 0] + K2, 9);
1367 ROUND(G, b, c, d, a, in[ 2] + K2, 13);
1368 ROUND(G, a, b, c, d, in[ 4] + K2, 3);
1369 ROUND(G, d, a, b, c, in[ 6] + K2, 5);
1370 ROUND(G, c, d, a, b, in[ 8] + K2, 9);
1371 ROUND(G, b, c, d, a, in[10] + K2, 13);
1372
1373 /* Round 3 */
1374 ROUND(H, a, b, c, d, in[ 3] + K3, 3);
1375 ROUND(H, d, a, b, c, in[ 7] + K3, 9);
1376 ROUND(H, c, d, a, b, in[11] + K3, 11);
1377 ROUND(H, b, c, d, a, in[ 2] + K3, 15);
1378 ROUND(H, a, b, c, d, in[ 6] + K3, 3);
1379 ROUND(H, d, a, b, c, in[10] + K3, 9);
1380 ROUND(H, c, d, a, b, in[ 1] + K3, 11);
1381 ROUND(H, b, c, d, a, in[ 5] + K3, 15);
1382 ROUND(H, a, b, c, d, in[ 9] + K3, 3);
1383 ROUND(H, d, a, b, c, in[ 0] + K3, 9);
1384 ROUND(H, c, d, a, b, in[ 4] + K3, 11);
1385 ROUND(H, b, c, d, a, in[ 8] + K3, 15);
1386
1387 return buf[1] + b; /* "most hashed" word */
1388 /* Alternative: return sum of all words? */
1389}
1390#endif
1391
1392#undef ROUND
1393#undef F
1394#undef G
1395#undef H
1396#undef K1
1397#undef K2
1398#undef K3
1399
1400/* This should not be decreased so low that ISNs wrap too fast. */
1401#define REKEY_INTERVAL (300 * HZ)
1402/*
1403 * Bit layout of the tcp sequence numbers (before adding current time):
1404 * bit 24-31: increased after every key exchange
1405 * bit 0-23: hash(source,dest)
1406 *
1407 * The implementation is similar to the algorithm described
1408 * in the Appendix of RFC 1185, except that
1409 * - it uses a 1 MHz clock instead of a 250 kHz clock
1410 * - it performs a rekey every 5 minutes, which is equivalent
1411 * to a (source,dest) tulple dependent forward jump of the
1412 * clock by 0..2^(HASH_BITS+1)
1413 *
1414 * Thus the average ISN wraparound time is 68 minutes instead of
1415 * 4.55 hours.
1416 *
1417 * SMP cleanup and lock avoidance with poor man's RCU.
1418 * Manfred Spraul <manfred@colorfullife.com>
1419 *
1420 */
1421#define COUNT_BITS 8
1422#define COUNT_MASK ((1 << COUNT_BITS) - 1)
1423#define HASH_BITS 24
1424#define HASH_MASK ((1 << HASH_BITS) - 1)
1425 1304
1426static struct keydata { 1305static int __init random_int_secret_init(void)
1427 __u32 count; /* already shifted to the final position */
1428 __u32 secret[12];
1429} ____cacheline_aligned ip_keydata[2];
1430
1431static unsigned int ip_cnt;
1432
1433static void rekey_seq_generator(struct work_struct *work);
1434
1435static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1436
1437/*
1438 * Lock avoidance:
1439 * The ISN generation runs lockless - it's just a hash over random data.
1440 * State changes happen every 5 minutes when the random key is replaced.
1441 * Synchronization is performed by having two copies of the hash function
1442 * state and rekey_seq_generator always updates the inactive copy.
1443 * The copy is then activated by updating ip_cnt.
1444 * The implementation breaks down if someone blocks the thread
1445 * that processes SYN requests for more than 5 minutes. Should never
1446 * happen, and even if that happens only a not perfectly compliant
1447 * ISN is generated, nothing fatal.
1448 */
1449static void rekey_seq_generator(struct work_struct *work)
1450{ 1306{
1451 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1307 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1452
1453 get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
1454 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
1455 smp_wmb();
1456 ip_cnt++;
1457 schedule_delayed_work(&rekey_work,
1458 round_jiffies_relative(REKEY_INTERVAL));
1459}
1460
1461static inline struct keydata *get_keyptr(void)
1462{
1463 struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
1464
1465 smp_rmb();
1466
1467 return keyptr;
1468}
1469
1470static __init int seqgen_init(void)
1471{
1472 rekey_seq_generator(NULL);
1473 return 0; 1308 return 0;
1474} 1309}
1475late_initcall(seqgen_init); 1310late_initcall(random_int_secret_init);
1476
1477#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1478__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1479 __be16 sport, __be16 dport)
1480{
1481 __u32 seq;
1482 __u32 hash[12];
1483 struct keydata *keyptr = get_keyptr();
1484
1485 /* The procedure is the same as for IPv4, but addresses are longer.
1486 * Thus we must use twothirdsMD4Transform.
1487 */
1488
1489 memcpy(hash, saddr, 16);
1490 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1491 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1492
1493 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1494 seq += keyptr->count;
1495
1496 seq += ktime_to_ns(ktime_get_real());
1497
1498 return seq;
1499}
1500EXPORT_SYMBOL(secure_tcpv6_sequence_number);
1501#endif
1502
1503/* The code below is shamelessly stolen from secure_tcp_sequence_number().
1504 * All blames to Andrey V. Savochkin <saw@msu.ru>.
1505 */
1506__u32 secure_ip_id(__be32 daddr)
1507{
1508 struct keydata *keyptr;
1509 __u32 hash[4];
1510
1511 keyptr = get_keyptr();
1512
1513 /*
1514 * Pick a unique starting offset for each IP destination.
1515 * The dest ip address is placed in the starting vector,
1516 * which is then hashed with random data.
1517 */
1518 hash[0] = (__force __u32)daddr;
1519 hash[1] = keyptr->secret[9];
1520 hash[2] = keyptr->secret[10];
1521 hash[3] = keyptr->secret[11];
1522
1523 return half_md4_transform(hash, keyptr->secret);
1524}
1525
1526__u32 secure_ipv6_id(const __be32 daddr[4])
1527{
1528 const struct keydata *keyptr;
1529 __u32 hash[4];
1530
1531 keyptr = get_keyptr();
1532
1533 hash[0] = (__force __u32)daddr[0];
1534 hash[1] = (__force __u32)daddr[1];
1535 hash[2] = (__force __u32)daddr[2];
1536 hash[3] = (__force __u32)daddr[3];
1537
1538 return half_md4_transform(hash, keyptr->secret);
1539}
1540
1541#ifdef CONFIG_INET
1542
1543__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1544 __be16 sport, __be16 dport)
1545{
1546 __u32 seq;
1547 __u32 hash[4];
1548 struct keydata *keyptr = get_keyptr();
1549
1550 /*
1551 * Pick a unique starting offset for each TCP connection endpoints
1552 * (saddr, daddr, sport, dport).
1553 * Note that the words are placed into the starting vector, which is
1554 * then mixed with a partial MD4 over random data.
1555 */
1556 hash[0] = (__force u32)saddr;
1557 hash[1] = (__force u32)daddr;
1558 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1559 hash[3] = keyptr->secret[11];
1560
1561 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1562 seq += keyptr->count;
1563 /*
1564 * As close as possible to RFC 793, which
1565 * suggests using a 250 kHz clock.
1566 * Further reading shows this assumes 2 Mb/s networks.
1567 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
1568 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
1569 * we also need to limit the resolution so that the u32 seq
1570 * overlaps less than one time per MSL (2 minutes).
1571 * Choosing a clock of 64 ns period is OK. (period of 274 s)
1572 */
1573 seq += ktime_to_ns(ktime_get_real()) >> 6;
1574
1575 return seq;
1576}
1577
1578/* Generate secure starting point for ephemeral IPV4 transport port search */
1579u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1580{
1581 struct keydata *keyptr = get_keyptr();
1582 u32 hash[4];
1583
1584 /*
1585 * Pick a unique starting offset for each ephemeral port search
1586 * (saddr, daddr, dport) and 48bits of random data.
1587 */
1588 hash[0] = (__force u32)saddr;
1589 hash[1] = (__force u32)daddr;
1590 hash[2] = (__force u32)dport ^ keyptr->secret[10];
1591 hash[3] = keyptr->secret[11];
1592
1593 return half_md4_transform(hash, keyptr->secret);
1594}
1595EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
1596
1597#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1598u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1599 __be16 dport)
1600{
1601 struct keydata *keyptr = get_keyptr();
1602 u32 hash[12];
1603
1604 memcpy(hash, saddr, 16);
1605 hash[4] = (__force u32)dport;
1606 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1607
1608 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1609}
1610#endif
1611
1612#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1613/* Similar to secure_tcp_sequence_number but generate a 48 bit value
1614 * bit's 32-47 increase every key exchange
1615 * 0-31 hash(source, dest)
1616 */
1617u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1618 __be16 sport, __be16 dport)
1619{
1620 u64 seq;
1621 __u32 hash[4];
1622 struct keydata *keyptr = get_keyptr();
1623
1624 hash[0] = (__force u32)saddr;
1625 hash[1] = (__force u32)daddr;
1626 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1627 hash[3] = keyptr->secret[11];
1628
1629 seq = half_md4_transform(hash, keyptr->secret);
1630 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1631
1632 seq += ktime_to_ns(ktime_get_real());
1633 seq &= (1ull << 48) - 1;
1634
1635 return seq;
1636}
1637EXPORT_SYMBOL(secure_dccp_sequence_number);
1638#endif
1639
1640#endif /* CONFIG_INET */
1641
1642 1311
1643/* 1312/*
1644 * Get a random word for internal kernel use only. Similar to urandom but 1313 * Get a random word for internal kernel use only. Similar to urandom but
@@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
1646 * value is not cryptographically secure but for several uses the cost of 1315 * value is not cryptographically secure but for several uses the cost of
1647 * depleting entropy is too high 1316 * depleting entropy is too high
1648 */ 1317 */
1649DEFINE_PER_CPU(__u32 [4], get_random_int_hash); 1318DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1650unsigned int get_random_int(void) 1319unsigned int get_random_int(void)
1651{ 1320{
1652 struct keydata *keyptr;
1653 __u32 *hash = get_cpu_var(get_random_int_hash); 1321 __u32 *hash = get_cpu_var(get_random_int_hash);
1654 int ret; 1322 unsigned int ret;
1655 1323
1656 keyptr = get_keyptr();
1657 hash[0] += current->pid + jiffies + get_cycles(); 1324 hash[0] += current->pid + jiffies + get_cycles();
1658 1325 md5_transform(hash, random_int_secret);
1659 ret = half_md4_transform(hash, keyptr->secret); 1326 ret = hash[0];
1660 put_cpu_var(get_random_int_hash); 1327 put_cpu_var(get_random_int_hash);
1661 1328
1662 return ret; 1329 return ret;
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
new file mode 100644
index 000000000000..cf3ee008dca2
--- /dev/null
+++ b/drivers/char/tile-srom.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * SPI Flash ROM driver
15 *
16 * This source code is derived from code provided in "Linux Device
17 * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and
18 * Greg Kroah-Hartman, published by O'Reilly Media, Inc.
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/init.h>
24#include <linux/kernel.h> /* printk() */
25#include <linux/slab.h> /* kmalloc() */
26#include <linux/fs.h> /* everything... */
27#include <linux/errno.h> /* error codes */
28#include <linux/types.h> /* size_t */
29#include <linux/proc_fs.h>
30#include <linux/fcntl.h> /* O_ACCMODE */
31#include <linux/aio.h>
32#include <linux/pagemap.h>
33#include <linux/hugetlb.h>
34#include <linux/uaccess.h>
35#include <linux/platform_device.h>
36#include <hv/hypervisor.h>
37#include <linux/ioctl.h>
38#include <linux/cdev.h>
39#include <linux/delay.h>
40#include <hv/drv_srom_intf.h>
41
42/*
43 * Size of our hypervisor I/O requests. We break up large transfers
44 * so that we don't spend large uninterrupted spans of time in the
45 * hypervisor. Erasing an SROM sector takes a significant fraction of
46 * a second, so if we allowed the user to, say, do one I/O to write the
47 * entire ROM, we'd get soft lockup timeouts, or worse.
48 */
49#define SROM_CHUNK_SIZE ((size_t)4096)
50
51/*
52 * When hypervisor is busy (e.g. erasing), poll the status periodically.
53 */
54
55/*
56 * Interval to poll the state in msec
57 */
58#define SROM_WAIT_TRY_INTERVAL 20
59
60/*
61 * Maximum times to poll the state
62 */
63#define SROM_MAX_WAIT_TRY_TIMES 1000
64
65struct srom_dev {
66 int hv_devhdl; /* Handle for hypervisor device */
67 u32 total_size; /* Size of this device */
68 u32 sector_size; /* Size of a sector */
69 u32 page_size; /* Size of a page */
70 struct mutex lock; /* Allow only one accessor at a time */
71};
72
73static int srom_major; /* Dynamic major by default */
74module_param(srom_major, int, 0);
75MODULE_AUTHOR("Tilera Corporation");
76MODULE_LICENSE("GPL");
77
78static int srom_devs; /* Number of SROM partitions */
79static struct cdev srom_cdev;
80static struct class *srom_class;
81static struct srom_dev *srom_devices;
82
83/*
84 * Handle calling the hypervisor and managing EAGAIN/EBUSY.
85 */
86
87static ssize_t _srom_read(int hv_devhdl, void *buf,
88 loff_t off, size_t count)
89{
90 int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
91 for (;;) {
92 retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf,
93 count, off);
94 if (retval >= 0)
95 return retval;
96 if (retval == HV_EAGAIN)
97 continue;
98 if (retval == HV_EBUSY && --retries > 0) {
99 msleep(SROM_WAIT_TRY_INTERVAL);
100 continue;
101 }
102 pr_err("_srom_read: error %d\n", retval);
103 return -EIO;
104 }
105}
106
107static ssize_t _srom_write(int hv_devhdl, const void *buf,
108 loff_t off, size_t count)
109{
110 int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
111 for (;;) {
112 retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf,
113 count, off);
114 if (retval >= 0)
115 return retval;
116 if (retval == HV_EAGAIN)
117 continue;
118 if (retval == HV_EBUSY && --retries > 0) {
119 msleep(SROM_WAIT_TRY_INTERVAL);
120 continue;
121 }
122 pr_err("_srom_write: error %d\n", retval);
123 return -EIO;
124 }
125}
126
127/**
128 * srom_open() - Device open routine.
129 * @inode: Inode for this device.
130 * @filp: File for this specific open of the device.
131 *
132 * Returns zero, or an error code.
133 */
134static int srom_open(struct inode *inode, struct file *filp)
135{
136 filp->private_data = &srom_devices[iminor(inode)];
137 return 0;
138}
139
140
141/**
142 * srom_release() - Device release routine.
143 * @inode: Inode for this device.
144 * @filp: File for this specific open of the device.
145 *
146 * Returns zero, or an error code.
147 */
148static int srom_release(struct inode *inode, struct file *filp)
149{
150 struct srom_dev *srom = filp->private_data;
151 char dummy;
152
153 /* Make sure we've flushed anything written to the ROM. */
154 mutex_lock(&srom->lock);
155 if (srom->hv_devhdl >= 0)
156 _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1);
157 mutex_unlock(&srom->lock);
158
159 filp->private_data = NULL;
160
161 return 0;
162}
163
164
165/**
166 * srom_read() - Read data from the device.
167 * @filp: File for this specific open of the device.
168 * @buf: User's data buffer.
169 * @count: Number of bytes requested.
170 * @f_pos: File position.
171 *
172 * Returns number of bytes read, or an error code.
173 */
174static ssize_t srom_read(struct file *filp, char __user *buf,
175 size_t count, loff_t *f_pos)
176{
177 int retval = 0;
178 void *kernbuf;
179 struct srom_dev *srom = filp->private_data;
180
181 kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
182 if (!kernbuf)
183 return -ENOMEM;
184
185 if (mutex_lock_interruptible(&srom->lock)) {
186 retval = -ERESTARTSYS;
187 kfree(kernbuf);
188 return retval;
189 }
190
191 while (count) {
192 int hv_retval;
193 int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
194
195 hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
196 *f_pos, bytes_this_pass);
197 if (hv_retval > 0) {
198 if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
199 retval = -EFAULT;
200 break;
201 }
202 } else if (hv_retval <= 0) {
203 if (retval == 0)
204 retval = hv_retval;
205 break;
206 }
207
208 retval += hv_retval;
209 *f_pos += hv_retval;
210 buf += hv_retval;
211 count -= hv_retval;
212 }
213
214 mutex_unlock(&srom->lock);
215 kfree(kernbuf);
216
217 return retval;
218}
219
220/**
221 * srom_write() - Write data to the device.
222 * @filp: File for this specific open of the device.
223 * @buf: User's data buffer.
224 * @count: Number of bytes requested.
225 * @f_pos: File position.
226 *
227 * Returns number of bytes written, or an error code.
228 */
229static ssize_t srom_write(struct file *filp, const char __user *buf,
230 size_t count, loff_t *f_pos)
231{
232 int retval = 0;
233 void *kernbuf;
234 struct srom_dev *srom = filp->private_data;
235
236 kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
237 if (!kernbuf)
238 return -ENOMEM;
239
240 if (mutex_lock_interruptible(&srom->lock)) {
241 retval = -ERESTARTSYS;
242 kfree(kernbuf);
243 return retval;
244 }
245
246 while (count) {
247 int hv_retval;
248 int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
249
250 if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) {
251 retval = -EFAULT;
252 break;
253 }
254
255 hv_retval = _srom_write(srom->hv_devhdl, kernbuf,
256 *f_pos, bytes_this_pass);
257 if (hv_retval <= 0) {
258 if (retval == 0)
259 retval = hv_retval;
260 break;
261 }
262
263 retval += hv_retval;
264 *f_pos += hv_retval;
265 buf += hv_retval;
266 count -= hv_retval;
267 }
268
269 mutex_unlock(&srom->lock);
270 kfree(kernbuf);
271
272 return retval;
273}
274
275/* Provide our own implementation so we can use srom->total_size. */
276loff_t srom_llseek(struct file *filp, loff_t offset, int origin)
277{
278 struct srom_dev *srom = filp->private_data;
279
280 if (mutex_lock_interruptible(&srom->lock))
281 return -ERESTARTSYS;
282
283 switch (origin) {
284 case SEEK_END:
285 offset += srom->total_size;
286 break;
287 case SEEK_CUR:
288 offset += filp->f_pos;
289 break;
290 }
291
292 if (offset < 0 || offset > srom->total_size) {
293 offset = -EINVAL;
294 } else {
295 filp->f_pos = offset;
296 filp->f_version = 0;
297 }
298
299 mutex_unlock(&srom->lock);
300
301 return offset;
302}
303
304static ssize_t total_show(struct device *dev,
305 struct device_attribute *attr, char *buf)
306{
307 struct srom_dev *srom = dev_get_drvdata(dev);
308 return sprintf(buf, "%u\n", srom->total_size);
309}
310
311static ssize_t sector_show(struct device *dev,
312 struct device_attribute *attr, char *buf)
313{
314 struct srom_dev *srom = dev_get_drvdata(dev);
315 return sprintf(buf, "%u\n", srom->sector_size);
316}
317
318static ssize_t page_show(struct device *dev,
319 struct device_attribute *attr, char *buf)
320{
321 struct srom_dev *srom = dev_get_drvdata(dev);
322 return sprintf(buf, "%u\n", srom->page_size);
323}
324
325static struct device_attribute srom_dev_attrs[] = {
326 __ATTR(total_size, S_IRUGO, total_show, NULL),
327 __ATTR(sector_size, S_IRUGO, sector_show, NULL),
328 __ATTR(page_size, S_IRUGO, page_show, NULL),
329 __ATTR_NULL
330};
331
332static char *srom_devnode(struct device *dev, mode_t *mode)
333{
334 *mode = S_IRUGO | S_IWUSR;
335 return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
336}
337
338/*
339 * The fops
340 */
341static const struct file_operations srom_fops = {
342 .owner = THIS_MODULE,
343 .llseek = srom_llseek,
344 .read = srom_read,
345 .write = srom_write,
346 .open = srom_open,
347 .release = srom_release,
348};
349
350/**
351 * srom_setup_minor() - Initialize per-minor information.
352 * @srom: Per-device SROM state.
353 * @index: Device to set up.
354 */
355static int srom_setup_minor(struct srom_dev *srom, int index)
356{
357 struct device *dev;
358 int devhdl = srom->hv_devhdl;
359
360 mutex_init(&srom->lock);
361
362 if (_srom_read(devhdl, &srom->total_size,
363 SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0)
364 return -EIO;
365 if (_srom_read(devhdl, &srom->sector_size,
366 SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0)
367 return -EIO;
368 if (_srom_read(devhdl, &srom->page_size,
369 SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
370 return -EIO;
371
372 dev = device_create(srom_class, &platform_bus,
373 MKDEV(srom_major, index), srom, "%d", index);
374 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
375}
376
377/** srom_init() - Initialize the driver's module. */
378static int srom_init(void)
379{
380 int result, i;
381 dev_t dev = MKDEV(srom_major, 0);
382
383 /*
384 * Start with a plausible number of partitions; the krealloc() call
385 * below will yield about log(srom_devs) additional allocations.
386 */
387 srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
388
389 /* Discover the number of srom partitions. */
390 for (i = 0; ; i++) {
391 int devhdl;
392 char buf[20];
393 struct srom_dev *new_srom_devices =
394 krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
395 GFP_KERNEL | __GFP_ZERO);
396 if (!new_srom_devices) {
397 result = -ENOMEM;
398 goto fail_mem;
399 }
400 srom_devices = new_srom_devices;
401 sprintf(buf, "srom/0/%d", i);
402 devhdl = hv_dev_open((HV_VirtAddr)buf, 0);
403 if (devhdl < 0) {
404 if (devhdl != HV_ENODEV)
405 pr_notice("srom/%d: hv_dev_open failed: %d.\n",
406 i, devhdl);
407 break;
408 }
409 srom_devices[i].hv_devhdl = devhdl;
410 }
411 srom_devs = i;
412
413 /* Bail out early if we have no partitions at all. */
414 if (srom_devs == 0) {
415 result = -ENODEV;
416 goto fail_mem;
417 }
418
419 /* Register our major, and accept a dynamic number. */
420 if (srom_major)
421 result = register_chrdev_region(dev, srom_devs, "srom");
422 else {
423 result = alloc_chrdev_region(&dev, 0, srom_devs, "srom");
424 srom_major = MAJOR(dev);
425 }
426 if (result < 0)
427 goto fail_mem;
428
429 /* Register a character device. */
430 cdev_init(&srom_cdev, &srom_fops);
431 srom_cdev.owner = THIS_MODULE;
432 srom_cdev.ops = &srom_fops;
433 result = cdev_add(&srom_cdev, dev, srom_devs);
434 if (result < 0)
435 goto fail_chrdev;
436
437 /* Create a sysfs class. */
438 srom_class = class_create(THIS_MODULE, "srom");
439 if (IS_ERR(srom_class)) {
440 result = PTR_ERR(srom_class);
441 goto fail_cdev;
442 }
443 srom_class->dev_attrs = srom_dev_attrs;
444 srom_class->devnode = srom_devnode;
445
446 /* Do per-partition initialization */
447 for (i = 0; i < srom_devs; i++) {
448 result = srom_setup_minor(srom_devices + i, i);
449 if (result < 0)
450 goto fail_class;
451 }
452
453 return 0;
454
455fail_class:
456 for (i = 0; i < srom_devs; i++)
457 device_destroy(srom_class, MKDEV(srom_major, i));
458 class_destroy(srom_class);
459fail_cdev:
460 cdev_del(&srom_cdev);
461fail_chrdev:
462 unregister_chrdev_region(dev, srom_devs);
463fail_mem:
464 kfree(srom_devices);
465 return result;
466}
467
468/** srom_cleanup() - Clean up the driver's module. */
469static void srom_cleanup(void)
470{
471 int i;
472 for (i = 0; i < srom_devs; i++)
473 device_destroy(srom_class, MKDEV(srom_major, i));
474 class_destroy(srom_class);
475 cdev_del(&srom_cdev);
476 unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
477 kfree(srom_devices);
478}
479
480module_init(srom_init);
481module_exit(srom_cleanup);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 7beb0e25f1e1..caf8012ef47c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -534,6 +534,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
534 struct duration_t *duration_cap; 534 struct duration_t *duration_cap;
535 ssize_t rc; 535 ssize_t rc;
536 u32 timeout; 536 u32 timeout;
537 unsigned int scale = 1;
537 538
538 tpm_cmd.header.in = tpm_getcap_header; 539 tpm_cmd.header.in = tpm_getcap_header;
539 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; 540 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -545,24 +546,30 @@ void tpm_get_timeouts(struct tpm_chip *chip)
545 if (rc) 546 if (rc)
546 goto duration; 547 goto duration;
547 548
548 if (be32_to_cpu(tpm_cmd.header.out.length) 549 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
549 != 4 * sizeof(u32)) 550 be32_to_cpu(tpm_cmd.header.out.length)
550 goto duration; 551 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
552 return;
551 553
552 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; 554 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
553 /* Don't overwrite default if value is 0 */ 555 /* Don't overwrite default if value is 0 */
554 timeout = be32_to_cpu(timeout_cap->a); 556 timeout = be32_to_cpu(timeout_cap->a);
557 if (timeout && timeout < 1000) {
558 /* timeouts in msec rather usec */
559 scale = 1000;
560 chip->vendor.timeout_adjusted = true;
561 }
555 if (timeout) 562 if (timeout)
556 chip->vendor.timeout_a = usecs_to_jiffies(timeout); 563 chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
557 timeout = be32_to_cpu(timeout_cap->b); 564 timeout = be32_to_cpu(timeout_cap->b);
558 if (timeout) 565 if (timeout)
559 chip->vendor.timeout_b = usecs_to_jiffies(timeout); 566 chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
560 timeout = be32_to_cpu(timeout_cap->c); 567 timeout = be32_to_cpu(timeout_cap->c);
561 if (timeout) 568 if (timeout)
562 chip->vendor.timeout_c = usecs_to_jiffies(timeout); 569 chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
563 timeout = be32_to_cpu(timeout_cap->d); 570 timeout = be32_to_cpu(timeout_cap->d);
564 if (timeout) 571 if (timeout)
565 chip->vendor.timeout_d = usecs_to_jiffies(timeout); 572 chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
566 573
567duration: 574duration:
568 tpm_cmd.header.in = tpm_getcap_header; 575 tpm_cmd.header.in = tpm_getcap_header;
@@ -575,23 +582,31 @@ duration:
575 if (rc) 582 if (rc)
576 return; 583 return;
577 584
578 if (be32_to_cpu(tpm_cmd.header.out.return_code) 585 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
579 != 3 * sizeof(u32)) 586 be32_to_cpu(tpm_cmd.header.out.length)
587 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
580 return; 588 return;
589
581 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 590 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
582 chip->vendor.duration[TPM_SHORT] = 591 chip->vendor.duration[TPM_SHORT] =
583 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); 592 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
593 chip->vendor.duration[TPM_MEDIUM] =
594 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
595 chip->vendor.duration[TPM_LONG] =
596 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
597
584 /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above 598 /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
585 * value wrong and apparently reports msecs rather than usecs. So we 599 * value wrong and apparently reports msecs rather than usecs. So we
586 * fix up the resulting too-small TPM_SHORT value to make things work. 600 * fix up the resulting too-small TPM_SHORT value to make things work.
601 * We also scale the TPM_MEDIUM and -_LONG values by 1000.
587 */ 602 */
588 if (chip->vendor.duration[TPM_SHORT] < (HZ/100)) 603 if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
589 chip->vendor.duration[TPM_SHORT] = HZ; 604 chip->vendor.duration[TPM_SHORT] = HZ;
590 605 chip->vendor.duration[TPM_MEDIUM] *= 1000;
591 chip->vendor.duration[TPM_MEDIUM] = 606 chip->vendor.duration[TPM_LONG] *= 1000;
592 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); 607 chip->vendor.duration_adjusted = true;
593 chip->vendor.duration[TPM_LONG] = 608 dev_info(chip->dev, "Adjusting TPM timeout parameters.");
594 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); 609 }
595} 610}
596EXPORT_SYMBOL_GPL(tpm_get_timeouts); 611EXPORT_SYMBOL_GPL(tpm_get_timeouts);
597 612
@@ -600,7 +615,7 @@ void tpm_continue_selftest(struct tpm_chip *chip)
600 u8 data[] = { 615 u8 data[] = {
601 0, 193, /* TPM_TAG_RQU_COMMAND */ 616 0, 193, /* TPM_TAG_RQU_COMMAND */
602 0, 0, 0, 10, /* length */ 617 0, 0, 0, 10, /* length */
603 0, 0, 0, 83, /* TPM_ORD_GetCapability */ 618 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
604 }; 619 };
605 620
606 tpm_transmit(chip, data, sizeof(data)); 621 tpm_transmit(chip, data, sizeof(data));
@@ -863,18 +878,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
863 data = tpm_cmd.params.readpubek_out_buffer; 878 data = tpm_cmd.params.readpubek_out_buffer;
864 str += 879 str +=
865 sprintf(str, 880 sprintf(str,
866 "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" 881 "Algorithm: %02X %02X %02X %02X\n"
867 "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" 882 "Encscheme: %02X %02X\n"
868 " %02X %02X %02X %02X %02X %02X %02X %02X\n" 883 "Sigscheme: %02X %02X\n"
869 "Modulus length: %d\nModulus: \n", 884 "Parameters: %02X %02X %02X %02X "
870 data[10], data[11], data[12], data[13], data[14], 885 "%02X %02X %02X %02X "
871 data[15], data[16], data[17], data[22], data[23], 886 "%02X %02X %02X %02X\n"
872 data[24], data[25], data[26], data[27], data[28], 887 "Modulus length: %d\n"
873 data[29], data[30], data[31], data[32], data[33], 888 "Modulus:\n",
874 be32_to_cpu(*((__be32 *) (data + 34)))); 889 data[0], data[1], data[2], data[3],
890 data[4], data[5],
891 data[6], data[7],
892 data[12], data[13], data[14], data[15],
893 data[16], data[17], data[18], data[19],
894 data[20], data[21], data[22], data[23],
895 be32_to_cpu(*((__be32 *) (data + 24))));
875 896
876 for (i = 0; i < 256; i++) { 897 for (i = 0; i < 256; i++) {
877 str += sprintf(str, "%02X ", data[i + 38]); 898 str += sprintf(str, "%02X ", data[i + 28]);
878 if ((i + 1) % 16 == 0) 899 if ((i + 1) % 16 == 0)
879 str += sprintf(str, "\n"); 900 str += sprintf(str, "\n");
880 } 901 }
@@ -937,6 +958,35 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
937} 958}
938EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 959EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
939 960
961ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
962 char *buf)
963{
964 struct tpm_chip *chip = dev_get_drvdata(dev);
965
966 return sprintf(buf, "%d %d %d [%s]\n",
967 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
968 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
969 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
970 chip->vendor.duration_adjusted
971 ? "adjusted" : "original");
972}
973EXPORT_SYMBOL_GPL(tpm_show_durations);
974
975ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
976 char *buf)
977{
978 struct tpm_chip *chip = dev_get_drvdata(dev);
979
980 return sprintf(buf, "%d %d %d %d [%s]\n",
981 jiffies_to_usecs(chip->vendor.timeout_a),
982 jiffies_to_usecs(chip->vendor.timeout_b),
983 jiffies_to_usecs(chip->vendor.timeout_c),
984 jiffies_to_usecs(chip->vendor.timeout_d),
985 chip->vendor.timeout_adjusted
986 ? "adjusted" : "original");
987}
988EXPORT_SYMBOL_GPL(tpm_show_timeouts);
989
940ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, 990ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
941 const char *buf, size_t count) 991 const char *buf, size_t count)
942{ 992{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb031b69a..9c4163cfa3ce 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
56 char *); 56 char *);
57extern ssize_t tpm_show_temp_deactivated(struct device *, 57extern ssize_t tpm_show_temp_deactivated(struct device *,
58 struct device_attribute *attr, char *); 58 struct device_attribute *attr, char *);
59extern ssize_t tpm_show_durations(struct device *,
60 struct device_attribute *attr, char *);
61extern ssize_t tpm_show_timeouts(struct device *,
62 struct device_attribute *attr, char *);
59 63
60struct tpm_chip; 64struct tpm_chip;
61 65
@@ -67,6 +71,7 @@ struct tpm_vendor_specific {
67 unsigned long base; /* TPM base address */ 71 unsigned long base; /* TPM base address */
68 72
69 int irq; 73 int irq;
74 int probed_irq;
70 75
71 int region_size; 76 int region_size;
72 int have_region; 77 int have_region;
@@ -81,7 +86,9 @@ struct tpm_vendor_specific {
81 struct list_head list; 86 struct list_head list;
82 int locality; 87 int locality;
83 unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ 88 unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
89 bool timeout_adjusted;
84 unsigned long duration[3]; /* jiffies */ 90 unsigned long duration[3]; /* jiffies */
91 bool duration_adjusted;
85 92
86 wait_queue_head_t read_queue; 93 wait_queue_head_t read_queue;
87 wait_queue_head_t int_queue; 94 wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index a605cb7dd898..82facc9104c7 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -330,12 +330,12 @@ static int __init init_nsc(void)
330 pdev->dev.driver = &nsc_drv.driver; 330 pdev->dev.driver = &nsc_drv.driver;
331 pdev->dev.release = tpm_nsc_remove; 331 pdev->dev.release = tpm_nsc_remove;
332 332
333 if ((rc = platform_device_register(pdev)) < 0) 333 if ((rc = platform_device_add(pdev)) < 0)
334 goto err_free_dev; 334 goto err_put_dev;
335 335
336 if (request_region(base, 2, "tpm_nsc0") == NULL ) { 336 if (request_region(base, 2, "tpm_nsc0") == NULL ) {
337 rc = -EBUSY; 337 rc = -EBUSY;
338 goto err_unreg_dev; 338 goto err_del_dev;
339 } 339 }
340 340
341 if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) { 341 if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
@@ -382,10 +382,10 @@ static int __init init_nsc(void)
382 382
383err_rel_reg: 383err_rel_reg:
384 release_region(base, 2); 384 release_region(base, 2);
385err_unreg_dev: 385err_del_dev:
386 platform_device_unregister(pdev); 386 platform_device_del(pdev);
387err_free_dev: 387err_put_dev:
388 kfree(pdev); 388 platform_device_put(pdev);
389err_unreg_drv: 389err_unreg_drv:
390 platform_driver_unregister(&nsc_drv); 390 platform_driver_unregister(&nsc_drv);
391 return rc; 391 return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index dd21df55689d..3f4051a7c5a7 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/wait.h> 27#include <linux/wait.h>
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/freezer.h>
29#include "tpm.h" 30#include "tpm.h"
30 31
31#define TPM_HEADER_SIZE 10 32#define TPM_HEADER_SIZE 10
@@ -79,7 +80,7 @@ enum tis_defaults {
79static LIST_HEAD(tis_chips); 80static LIST_HEAD(tis_chips);
80static DEFINE_SPINLOCK(tis_lock); 81static DEFINE_SPINLOCK(tis_lock);
81 82
82#ifdef CONFIG_ACPI 83#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
83static int is_itpm(struct pnp_dev *dev) 84static int is_itpm(struct pnp_dev *dev)
84{ 85{
85 struct acpi_device *acpi = pnp_acpi_device(dev); 86 struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,7 +94,7 @@ static int is_itpm(struct pnp_dev *dev)
93 return 0; 94 return 0;
94} 95}
95#else 96#else
96static int is_itpm(struct pnp_dev *dev) 97static inline int is_itpm(struct pnp_dev *dev)
97{ 98{
98 return 0; 99 return 0;
99} 100}
@@ -120,7 +121,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force)
120 121
121static int request_locality(struct tpm_chip *chip, int l) 122static int request_locality(struct tpm_chip *chip, int l)
122{ 123{
123 unsigned long stop; 124 unsigned long stop, timeout;
124 long rc; 125 long rc;
125 126
126 if (check_locality(chip, l) >= 0) 127 if (check_locality(chip, l) >= 0)
@@ -129,17 +130,25 @@ static int request_locality(struct tpm_chip *chip, int l)
129 iowrite8(TPM_ACCESS_REQUEST_USE, 130 iowrite8(TPM_ACCESS_REQUEST_USE,
130 chip->vendor.iobase + TPM_ACCESS(l)); 131 chip->vendor.iobase + TPM_ACCESS(l));
131 132
133 stop = jiffies + chip->vendor.timeout_a;
134
132 if (chip->vendor.irq) { 135 if (chip->vendor.irq) {
136again:
137 timeout = stop - jiffies;
138 if ((long)timeout <= 0)
139 return -1;
133 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 140 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
134 (check_locality 141 (check_locality
135 (chip, l) >= 0), 142 (chip, l) >= 0),
136 chip->vendor.timeout_a); 143 timeout);
137 if (rc > 0) 144 if (rc > 0)
138 return l; 145 return l;
139 146 if (rc == -ERESTARTSYS && freezing(current)) {
147 clear_thread_flag(TIF_SIGPENDING);
148 goto again;
149 }
140 } else { 150 } else {
141 /* wait for burstcount */ 151 /* wait for burstcount */
142 stop = jiffies + chip->vendor.timeout_a;
143 do { 152 do {
144 if (check_locality(chip, l) >= 0) 153 if (check_locality(chip, l) >= 0)
145 return l; 154 return l;
@@ -196,15 +205,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
196 if ((status & mask) == mask) 205 if ((status & mask) == mask)
197 return 0; 206 return 0;
198 207
208 stop = jiffies + timeout;
209
199 if (chip->vendor.irq) { 210 if (chip->vendor.irq) {
211again:
212 timeout = stop - jiffies;
213 if ((long)timeout <= 0)
214 return -ETIME;
200 rc = wait_event_interruptible_timeout(*queue, 215 rc = wait_event_interruptible_timeout(*queue,
201 ((tpm_tis_status 216 ((tpm_tis_status
202 (chip) & mask) == 217 (chip) & mask) ==
203 mask), timeout); 218 mask), timeout);
204 if (rc > 0) 219 if (rc > 0)
205 return 0; 220 return 0;
221 if (rc == -ERESTARTSYS && freezing(current)) {
222 clear_thread_flag(TIF_SIGPENDING);
223 goto again;
224 }
206 } else { 225 } else {
207 stop = jiffies + timeout;
208 do { 226 do {
209 msleep(TPM_TIMEOUT); 227 msleep(TPM_TIMEOUT);
210 status = tpm_tis_status(chip); 228 status = tpm_tis_status(chip);
@@ -288,11 +306,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
288 * tpm.c can skip polling for the data to be available as the interrupt is 306 * tpm.c can skip polling for the data to be available as the interrupt is
289 * waited for here 307 * waited for here
290 */ 308 */
291static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 309static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
292{ 310{
293 int rc, status, burstcnt; 311 int rc, status, burstcnt;
294 size_t count = 0; 312 size_t count = 0;
295 u32 ordinal;
296 313
297 if (request_locality(chip, 0) < 0) 314 if (request_locality(chip, 0) < 0)
298 return -EBUSY; 315 return -EBUSY;
@@ -327,8 +344,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
327 344
328 /* write last byte */ 345 /* write last byte */
329 iowrite8(buf[count], 346 iowrite8(buf[count],
330 chip->vendor.iobase + 347 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
331 TPM_DATA_FIFO(chip->vendor.locality));
332 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 348 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
333 &chip->vendor.int_queue); 349 &chip->vendor.int_queue);
334 status = tpm_tis_status(chip); 350 status = tpm_tis_status(chip);
@@ -337,6 +353,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
337 goto out_err; 353 goto out_err;
338 } 354 }
339 355
356 return 0;
357
358out_err:
359 tpm_tis_ready(chip);
360 release_locality(chip, chip->vendor.locality, 0);
361 return rc;
362}
363
364/*
365 * If interrupts are used (signaled by an irq set in the vendor structure)
366 * tpm.c can skip polling for the data to be available as the interrupt is
367 * waited for here
368 */
369static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
370{
371 int rc;
372 u32 ordinal;
373
374 rc = tpm_tis_send_data(chip, buf, len);
375 if (rc < 0)
376 return rc;
377
340 /* go and do it */ 378 /* go and do it */
341 iowrite8(TPM_STS_GO, 379 iowrite8(TPM_STS_GO,
342 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 380 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
@@ -358,6 +396,47 @@ out_err:
358 return rc; 396 return rc;
359} 397}
360 398
399/*
400 * Early probing for iTPM with STS_DATA_EXPECT flaw.
401 * Try sending command without itpm flag set and if that
402 * fails, repeat with itpm flag set.
403 */
404static int probe_itpm(struct tpm_chip *chip)
405{
406 int rc = 0;
407 u8 cmd_getticks[] = {
408 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
409 0x00, 0x00, 0x00, 0xf1
410 };
411 size_t len = sizeof(cmd_getticks);
412 int rem_itpm = itpm;
413
414 itpm = 0;
415
416 rc = tpm_tis_send_data(chip, cmd_getticks, len);
417 if (rc == 0)
418 goto out;
419
420 tpm_tis_ready(chip);
421 release_locality(chip, chip->vendor.locality, 0);
422
423 itpm = 1;
424
425 rc = tpm_tis_send_data(chip, cmd_getticks, len);
426 if (rc == 0) {
427 dev_info(chip->dev, "Detected an iTPM.\n");
428 rc = 1;
429 } else
430 rc = -EFAULT;
431
432out:
433 itpm = rem_itpm;
434 tpm_tis_ready(chip);
435 release_locality(chip, chip->vendor.locality, 0);
436
437 return rc;
438}
439
361static const struct file_operations tis_ops = { 440static const struct file_operations tis_ops = {
362 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
363 .llseek = no_llseek, 442 .llseek = no_llseek,
@@ -376,6 +455,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL); 455 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 456static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 457static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
458static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
459static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
379 460
380static struct attribute *tis_attrs[] = { 461static struct attribute *tis_attrs[] = {
381 &dev_attr_pubek.attr, 462 &dev_attr_pubek.attr,
@@ -385,7 +466,9 @@ static struct attribute *tis_attrs[] = {
385 &dev_attr_owned.attr, 466 &dev_attr_owned.attr,
386 &dev_attr_temp_deactivated.attr, 467 &dev_attr_temp_deactivated.attr,
387 &dev_attr_caps.attr, 468 &dev_attr_caps.attr,
388 &dev_attr_cancel.attr, NULL, 469 &dev_attr_cancel.attr,
470 &dev_attr_durations.attr,
471 &dev_attr_timeouts.attr, NULL,
389}; 472};
390 473
391static struct attribute_group tis_attr_grp = { 474static struct attribute_group tis_attr_grp = {
@@ -416,7 +499,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id)
416 if (interrupt == 0) 499 if (interrupt == 0)
417 return IRQ_NONE; 500 return IRQ_NONE;
418 501
419 chip->vendor.irq = irq; 502 chip->vendor.probed_irq = irq;
420 503
421 /* Clear interrupts handled with TPM_EOI */ 504 /* Clear interrupts handled with TPM_EOI */
422 iowrite32(interrupt, 505 iowrite32(interrupt,
@@ -464,7 +547,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
464 resource_size_t len, unsigned int irq) 547 resource_size_t len, unsigned int irq)
465{ 548{
466 u32 vendor, intfcaps, intmask; 549 u32 vendor, intfcaps, intmask;
467 int rc, i; 550 int rc, i, irq_s, irq_e;
468 struct tpm_chip *chip; 551 struct tpm_chip *chip;
469 552
470 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 553 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -493,6 +576,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
493 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 576 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
494 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 577 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
495 578
579 if (!itpm) {
580 itpm = probe_itpm(chip);
581 if (itpm < 0) {
582 rc = -ENODEV;
583 goto out_err;
584 }
585 }
586
496 if (itpm) 587 if (itpm)
497 dev_info(dev, "Intel iTPM workaround enabled\n"); 588 dev_info(dev, "Intel iTPM workaround enabled\n");
498 589
@@ -522,6 +613,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
522 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 613 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
523 dev_dbg(dev, "\tData Avail Int Support\n"); 614 dev_dbg(dev, "\tData Avail Int Support\n");
524 615
616 /* get the timeouts before testing for irqs */
617 tpm_get_timeouts(chip);
618
525 /* INTERRUPT Setup */ 619 /* INTERRUPT Setup */
526 init_waitqueue_head(&chip->vendor.read_queue); 620 init_waitqueue_head(&chip->vendor.read_queue);
527 init_waitqueue_head(&chip->vendor.int_queue); 621 init_waitqueue_head(&chip->vendor.int_queue);
@@ -540,13 +634,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
540 if (interrupts) 634 if (interrupts)
541 chip->vendor.irq = irq; 635 chip->vendor.irq = irq;
542 if (interrupts && !chip->vendor.irq) { 636 if (interrupts && !chip->vendor.irq) {
543 chip->vendor.irq = 637 irq_s =
544 ioread8(chip->vendor.iobase + 638 ioread8(chip->vendor.iobase +
545 TPM_INT_VECTOR(chip->vendor.locality)); 639 TPM_INT_VECTOR(chip->vendor.locality));
640 if (irq_s) {
641 irq_e = irq_s;
642 } else {
643 irq_s = 3;
644 irq_e = 15;
645 }
546 646
547 for (i = 3; i < 16 && chip->vendor.irq == 0; i++) { 647 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
548 iowrite8(i, chip->vendor.iobase + 648 iowrite8(i, chip->vendor.iobase +
549 TPM_INT_VECTOR(chip->vendor.locality)); 649 TPM_INT_VECTOR(chip->vendor.locality));
550 if (request_irq 650 if (request_irq
551 (i, tis_int_probe, IRQF_SHARED, 651 (i, tis_int_probe, IRQF_SHARED,
552 chip->vendor.miscdev.name, chip) != 0) { 652 chip->vendor.miscdev.name, chip) != 0) {
@@ -568,9 +668,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
568 chip->vendor.iobase + 668 chip->vendor.iobase +
569 TPM_INT_ENABLE(chip->vendor.locality)); 669 TPM_INT_ENABLE(chip->vendor.locality));
570 670
671 chip->vendor.probed_irq = 0;
672
571 /* Generate Interrupts */ 673 /* Generate Interrupts */
572 tpm_gen_interrupt(chip); 674 tpm_gen_interrupt(chip);
573 675
676 chip->vendor.irq = chip->vendor.probed_irq;
677
678 /* free_irq will call into tis_int_probe;
679 clear all irqs we haven't seen while doing
680 tpm_gen_interrupt */
681 iowrite32(ioread32
682 (chip->vendor.iobase +
683 TPM_INT_STATUS(chip->vendor.locality)),
684 chip->vendor.iobase +
685 TPM_INT_STATUS(chip->vendor.locality));
686
574 /* Turn off */ 687 /* Turn off */
575 iowrite32(intmask, 688 iowrite32(intmask,
576 chip->vendor.iobase + 689 chip->vendor.iobase +
@@ -609,7 +722,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
609 list_add(&chip->vendor.list, &tis_chips); 722 list_add(&chip->vendor.list, &tis_chips);
610 spin_unlock(&tis_lock); 723 spin_unlock(&tis_lock);
611 724
612 tpm_get_timeouts(chip);
613 tpm_continue_selftest(chip); 725 tpm_continue_selftest(chip);
614 726
615 return 0; 727 return 0;
@@ -619,6 +731,29 @@ out_err:
619 tpm_remove_hardware(chip->dev); 731 tpm_remove_hardware(chip->dev);
620 return rc; 732 return rc;
621} 733}
734
735static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
736{
737 u32 intmask;
738
739 /* reenable interrupts that device may have lost or
740 BIOS/firmware may have disabled */
741 iowrite8(chip->vendor.irq, chip->vendor.iobase +
742 TPM_INT_VECTOR(chip->vendor.locality));
743
744 intmask =
745 ioread32(chip->vendor.iobase +
746 TPM_INT_ENABLE(chip->vendor.locality));
747
748 intmask |= TPM_INTF_CMD_READY_INT
749 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
750 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
751
752 iowrite32(intmask,
753 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
754}
755
756
622#ifdef CONFIG_PNP 757#ifdef CONFIG_PNP
623static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 758static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
624 const struct pnp_device_id *pnp_id) 759 const struct pnp_device_id *pnp_id)
@@ -650,6 +785,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
650 struct tpm_chip *chip = pnp_get_drvdata(dev); 785 struct tpm_chip *chip = pnp_get_drvdata(dev);
651 int ret; 786 int ret;
652 787
788 if (chip->vendor.irq)
789 tpm_tis_reenable_interrupts(chip);
790
653 ret = tpm_pm_resume(&dev->dev); 791 ret = tpm_pm_resume(&dev->dev);
654 if (!ret) 792 if (!ret)
655 tpm_continue_selftest(chip); 793 tpm_continue_selftest(chip);
@@ -702,6 +840,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
702 840
703static int tpm_tis_resume(struct platform_device *dev) 841static int tpm_tis_resume(struct platform_device *dev)
704{ 842{
843 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
844
845 if (chip->vendor.irq)
846 tpm_tis_reenable_interrupts(chip);
847
705 return tpm_pm_resume(&dev->dev); 848 return tpm_pm_resume(&dev->dev);
706} 849}
707static struct platform_driver tis_drv = { 850static struct platform_driver tis_drv = {