aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/char
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig52
-rw-r--r--drivers/char/Makefile4
-rw-r--r--drivers/char/agp/ali-agp.c7
-rw-r--r--drivers/char/agp/amd-k7-agp.c8
-rw-r--r--drivers/char/agp/amd64-agp.c19
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/backend.c15
-rw-r--r--drivers/char/agp/efficeon-agp.c6
-rw-r--r--drivers/char/agp/generic.c12
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/agp/i460-agp.c8
-rw-r--r--drivers/char/agp/intel-agp.c21
-rw-r--r--drivers/char/agp/intel-agp.h52
-rw-r--r--drivers/char/agp/intel-gtt.c351
-rw-r--r--drivers/char/agp/nvidia-agp.c6
-rw-r--r--drivers/char/agp/sgi-agp.c10
-rw-r--r--drivers/char/agp/sis-agp.c13
-rw-r--r--drivers/char/agp/sworks-agp.c6
-rw-r--r--drivers/char/agp/uninorth-agp.c8
-rw-r--r--drivers/char/agp/via-agp.c7
-rw-r--r--drivers/char/apm-emulation.c22
-rw-r--r--drivers/char/bsr.c6
-rw-r--r--drivers/char/ds1302.c1
-rw-r--r--drivers/char/ds1620.c8
-rw-r--r--drivers/char/efirtc.c1
-rw-r--r--drivers/char/genrtc.c1
-rw-r--r--drivers/char/hpet.c10
-rw-r--r--drivers/char/hw_random/Kconfig88
-rw-r--r--drivers/char/hw_random/Makefile5
-rw-r--r--drivers/char/hw_random/atmel-rng.c155
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c175
-rw-r--r--drivers/char/hw_random/exynos-rng.c182
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c5
-rw-r--r--drivers/char/hw_random/mxc-rnga.c127
-rw-r--r--drivers/char/hw_random/n2-drv.c23
-rw-r--r--drivers/char/hw_random/nomadik-rng.c15
-rw-r--r--drivers/char/hw_random/octeon-rng.c32
-rw-r--r--drivers/char/hw_random/omap-rng.c146
-rw-r--r--drivers/char/hw_random/pasemi-rng.c16
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c16
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c16
-rw-r--r--drivers/char/hw_random/pseries-rng.c96
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c19
-rw-r--r--drivers/char/hw_random/tpm-rng.c50
-rw-r--r--drivers/char/hw_random/tx4939-rng.c5
-rw-r--r--drivers/char/hw_random/virtio-rng.c42
-rw-r--r--drivers/char/i8k.c8
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c247
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c165
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c110
-rw-r--r--drivers/char/lp.c8
-rw-r--r--drivers/char/mbcs.c5
-rw-r--r--drivers/char/mem.c87
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mmtimer.c3
-rw-r--r--drivers/char/mspec.c5
-rw-r--r--drivers/char/mwave/3780i.c1
-rw-r--r--drivers/char/mwave/mwavedd.c16
-rw-r--r--drivers/char/nvram.c3
-rw-r--r--drivers/char/nwbutton.c4
-rw-r--r--drivers/char/nwflash.c41
-rw-r--r--drivers/char/pc8736x_gpio.c3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c160
-rw-r--r--drivers/char/ppdev.c9
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/char/random.c419
-rw-r--r--drivers/char/raw.c5
-rw-r--r--drivers/char/rtc.c14
-rw-r--r--drivers/char/sonypi.c32
-rw-r--r--drivers/char/tb0219.c6
-rw-r--r--drivers/char/tile-srom.c14
-rw-r--r--drivers/char/tlclk.c6
-rw-r--r--drivers/char/tpm/Kconfig22
-rw-r--r--drivers/char/tpm/Makefile8
-rw-r--r--drivers/char/tpm/tpm.c275
-rw-r--r--drivers/char/tpm/tpm.h50
-rw-r--r--drivers/char/tpm/tpm_acpi.c109
-rw-r--r--drivers/char/tpm/tpm_atmel.c12
-rw-r--r--drivers/char/tpm/tpm_eventlog.c419
-rw-r--r--drivers/char/tpm/tpm_eventlog.h86
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c695
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c724
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h76
-rw-r--r--drivers/char/tpm/tpm_infineon.c12
-rw-r--r--drivers/char/tpm/tpm_nsc.c13
-rw-r--r--drivers/char/tpm/tpm_of.c73
-rw-r--r--drivers/char/tpm/tpm_ppi.c463
-rw-r--r--drivers/char/tpm/tpm_tis.c140
-rw-r--r--drivers/char/ttyprintk.c40
-rw-r--r--drivers/char/virtio_console.c653
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c45
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h2
95 files changed, 1662 insertions, 5518 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72bedad6bf8..b2aec046996 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@ menu "Character devices"
6 6
7source "drivers/tty/Kconfig" 7source "drivers/tty/Kconfig"
8 8
9config DEVMEM
10 bool "Memory device driver"
11 default y
12 help
13 The memory driver provides two character devices, mem and kmem, which
14 provide access to the system's memory. The mem device is a view of
15 physical memory, and each byte in the device corresponds to the
16 matching physical address. The kmem device is the same as mem, but
17 the addresses correspond to the kernel's virtual address space rather
18 than physical memory. These devices are standard parts of a Linux
19 system and most users should say Y here. You might say N if very
20 security conscience or memory is tight.
21
9config DEVKMEM 22config DEVKMEM
10 bool "/dev/kmem virtual device support" 23 bool "/dev/kmem virtual device support"
11 default y 24 default y
@@ -66,6 +79,21 @@ config TTY_PRINTK
66 79
67 If unsure, say N. 80 If unsure, say N.
68 81
82config BRIQ_PANEL
83 tristate 'Total Impact briQ front panel driver'
84 depends on PPC_CHRP
85 ---help---
86 The briQ is a small footprint CHRP computer with a frontpanel VFD, a
87 tristate led and two switches. It is the size of a CDROM drive.
88
89 If you have such one and want anything showing on the VFD then you
90 must answer Y here.
91
92 To compile this driver as a module, choose M here: the
93 module will be called briq_panel.
94
95 It's safe to say N here.
96
69config BFIN_OTP 97config BFIN_OTP
70 tristate "Blackfin On-Chip OTP Memory Support" 98 tristate "Blackfin On-Chip OTP Memory Support"
71 depends on BLACKFIN && (BF51x || BF52x || BF54x) 99 depends on BLACKFIN && (BF51x || BF52x || BF54x)
@@ -283,7 +311,7 @@ if RTC_LIB=n
283config RTC 311config RTC
284 tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" 312 tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
285 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \ 313 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
286 && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML 314 && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN
287 ---help--- 315 ---help---
288 If you say Y here and create a character special file /dev/rtc with 316 If you say Y here and create a character special file /dev/rtc with
289 major number 10 and minor number 135 using mknod ("man mknod"), you 317 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -331,7 +359,7 @@ config JS_RTC
331 359
332config GEN_RTC 360config GEN_RTC
333 tristate "Generic /dev/rtc emulation" 361 tristate "Generic /dev/rtc emulation"
334 depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML 362 depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN
335 ---help--- 363 ---help---
336 If you say Y here and create a character special file /dev/rtc with 364 If you say Y here and create a character special file /dev/rtc with
337 major number 10 and minor number 135 using mknod ("man mknod"), you 365 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -418,8 +446,8 @@ config APPLICOM
418 If unsure, say N. 446 If unsure, say N.
419 447
420config SONYPI 448config SONYPI
421 tristate "Sony Vaio Programmable I/O Control Device support" 449 tristate "Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)"
422 depends on X86 && PCI && INPUT && !64BIT 450 depends on EXPERIMENTAL && X86 && PCI && INPUT && !64BIT
423 ---help--- 451 ---help---
424 This driver enables access to the Sony Programmable I/O Control 452 This driver enables access to the Sony Programmable I/O Control
425 Device which can be found in many (all ?) Sony Vaio laptops. 453 Device which can be found in many (all ?) Sony Vaio laptops.
@@ -475,7 +503,7 @@ config SCx200_GPIO
475 503
476config PC8736x_GPIO 504config PC8736x_GPIO
477 tristate "NatSemi PC8736x GPIO Support" 505 tristate "NatSemi PC8736x GPIO Support"
478 depends on X86_32 && !UML 506 depends on X86_32
479 default SCx200_GPIO # mostly N 507 default SCx200_GPIO # mostly N
480 select NSC_GPIO # needed for support routines 508 select NSC_GPIO # needed for support routines
481 help 509 help
@@ -566,7 +594,7 @@ source "drivers/char/tpm/Kconfig"
566 594
567config TELCLOCK 595config TELCLOCK
568 tristate "Telecom clock driver for ATCA SBC" 596 tristate "Telecom clock driver for ATCA SBC"
569 depends on X86 597 depends on EXPERIMENTAL && X86
570 default n 598 default n
571 help 599 help
572 The telecom clock device is specific to the MPCBL0010 and MPCBL0050 600 The telecom clock device is specific to the MPCBL0010 and MPCBL0050
@@ -583,8 +611,20 @@ config DEVPORT
583 depends on ISA || PCI 611 depends on ISA || PCI
584 default y 612 default y
585 613
614config DCC_TTY
615 tristate "DCC tty driver"
616 depends on ARM
617
586source "drivers/s390/char/Kconfig" 618source "drivers/s390/char/Kconfig"
587 619
620config RAMOOPS
621 tristate "Log panic/oops to a RAM buffer"
622 depends on HAS_IOMEM
623 default n
624 help
625 This enables panic and oops messages to be logged to a circular
626 buffer in RAM where it can be read back at some later point.
627
588config MSM_SMD_PKT 628config MSM_SMD_PKT
589 bool "Enable device interface for some SMD packet ports" 629 bool "Enable device interface for some SMD packet ports"
590 default n 630 default n
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d208a..5e2fd709702 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
16obj-$(CONFIG_VIOTAPE) += viotape.o 16obj-$(CONFIG_VIOTAPE) += viotape.o
17obj-$(CONFIG_IBM_BSR) += bsr.o 17obj-$(CONFIG_IBM_BSR) += bsr.o
18obj-$(CONFIG_SGI_MBCS) += mbcs.o 18obj-$(CONFIG_SGI_MBCS) += mbcs.o
19obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
19obj-$(CONFIG_BFIN_OTP) += bfin-otp.o 20obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
20 21
21obj-$(CONFIG_PRINTER) += lp.o 22obj-$(CONFIG_PRINTER) += lp.o
@@ -52,11 +53,14 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o
52obj-$(CONFIG_MWAVE) += mwave/ 53obj-$(CONFIG_MWAVE) += mwave/
53obj-$(CONFIG_AGP) += agp/ 54obj-$(CONFIG_AGP) += agp/
54obj-$(CONFIG_PCMCIA) += pcmcia/ 55obj-$(CONFIG_PCMCIA) += pcmcia/
56obj-$(CONFIG_IPMI_HANDLER) += ipmi/
55 57
56obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o 58obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
57obj-$(CONFIG_TCG_TPM) += tpm/ 59obj-$(CONFIG_TCG_TPM) += tpm/
58 60
61obj-$(CONFIG_DCC_TTY) += dcc_tty.o
59obj-$(CONFIG_PS3_FLASH) += ps3flash.o 62obj-$(CONFIG_PS3_FLASH) += ps3flash.o
63obj-$(CONFIG_RAMOOPS) += ramoops.o
60 64
61obj-$(CONFIG_JS_RTC) += js-rtc.o 65obj-$(CONFIG_JS_RTC) += js-rtc.o
62js-rtc-y = rtc.o 66js-rtc-y = rtc.o
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 443cd6751ca..fd793519ea2 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -249,7 +249,7 @@ static const struct agp_bridge_driver ali_m1541_bridge = {
249}; 249};
250 250
251 251
252static struct agp_device_ids ali_agp_device_ids[] = 252static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
253{ 253{
254 { 254 {
255 .device_id = PCI_DEVICE_ID_AL_M1541, 255 .device_id = PCI_DEVICE_ID_AL_M1541,
@@ -299,7 +299,8 @@ static struct agp_device_ids ali_agp_device_ids[] =
299 { }, /* dummy final entry, always present */ 299 { }, /* dummy final entry, always present */
300}; 300};
301 301
302static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 302static int __devinit agp_ali_probe(struct pci_dev *pdev,
303 const struct pci_device_id *ent)
303{ 304{
304 struct agp_device_ids *devs = ali_agp_device_ids; 305 struct agp_device_ids *devs = ali_agp_device_ids;
305 struct agp_bridge_data *bridge; 306 struct agp_bridge_data *bridge;
@@ -373,7 +374,7 @@ found:
373 return agp_add_bridge(bridge); 374 return agp_add_bridge(bridge);
374} 375}
375 376
376static void agp_ali_remove(struct pci_dev *pdev) 377static void __devexit agp_ali_remove(struct pci_dev *pdev)
377{ 378{
378 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 379 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
379 380
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 779f0ab845a..f7e88787af9 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -388,7 +388,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
388 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 388 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
389}; 389};
390 390
391static struct agp_device_ids amd_agp_device_ids[] = 391static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
392{ 392{
393 { 393 {
394 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, 394 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
@@ -405,8 +405,8 @@ static struct agp_device_ids amd_agp_device_ids[] =
405 { }, /* dummy final entry, always present */ 405 { }, /* dummy final entry, always present */
406}; 406};
407 407
408static int agp_amdk7_probe(struct pci_dev *pdev, 408static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
409 const struct pci_device_id *ent) 409 const struct pci_device_id *ent)
410{ 410{
411 struct agp_bridge_data *bridge; 411 struct agp_bridge_data *bridge;
412 u8 cap_ptr; 412 u8 cap_ptr;
@@ -480,7 +480,7 @@ static int agp_amdk7_probe(struct pci_dev *pdev,
480 return agp_add_bridge(bridge); 480 return agp_add_bridge(bridge);
481} 481}
482 482
483static void agp_amdk7_remove(struct pci_dev *pdev) 483static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
484{ 484{
485 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 485 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
486 486
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index d79d692d05b..780498d7658 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -33,7 +33,7 @@
33#define ULI_X86_64_ENU_SCR_REG 0x54 33#define ULI_X86_64_ENU_SCR_REG 0x54
34 34
35static struct resource *aperture_resource; 35static struct resource *aperture_resource;
36static bool __initdata agp_try_unsupported = 1; 36static int __initdata agp_try_unsupported = 1;
37static int agp_bridges_found; 37static int agp_bridges_found;
38 38
39static void amd64_tlbflush(struct agp_memory *temp) 39static void amd64_tlbflush(struct agp_memory *temp)
@@ -240,7 +240,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
240}; 240};
241 241
242/* Some basic sanity checks for the aperture. */ 242/* Some basic sanity checks for the aperture. */
243static int agp_aperture_valid(u64 aper, u32 size) 243static int __devinit agp_aperture_valid(u64 aper, u32 size)
244{ 244{
245 if (!aperture_valid(aper, size, 32*1024*1024)) 245 if (!aperture_valid(aper, size, 32*1024*1024))
246 return 0; 246 return 0;
@@ -267,7 +267,8 @@ static int agp_aperture_valid(u64 aper, u32 size)
267 * to allocate that much memory. But at least error out cleanly instead of 267 * to allocate that much memory. But at least error out cleanly instead of
268 * crashing. 268 * crashing.
269 */ 269 */
270static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) 270static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
271 u16 cap)
271{ 272{
272 u32 aper_low, aper_hi; 273 u32 aper_low, aper_hi;
273 u64 aper, nb_aper; 274 u64 aper, nb_aper;
@@ -325,7 +326,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
325 return 0; 326 return 0;
326} 327}
327 328
328static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) 329static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
329{ 330{
330 int i; 331 int i;
331 332
@@ -351,7 +352,7 @@ static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
351} 352}
352 353
353/* Handle AMD 8151 quirks */ 354/* Handle AMD 8151 quirks */
354static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) 355static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
355{ 356{
356 char *revstring; 357 char *revstring;
357 358
@@ -389,7 +390,7 @@ static const struct aper_size_info_32 uli_sizes[7] =
389 {8, 2048, 1, 4}, 390 {8, 2048, 1, 4},
390 {4, 1024, 0, 3} 391 {4, 1024, 0, 3}
391}; 392};
392static int uli_agp_init(struct pci_dev *pdev) 393static int __devinit uli_agp_init(struct pci_dev *pdev)
393{ 394{
394 u32 httfea,baseaddr,enuscr; 395 u32 httfea,baseaddr,enuscr;
395 struct pci_dev *dev1; 396 struct pci_dev *dev1;
@@ -512,8 +513,8 @@ put:
512 return ret; 513 return ret;
513} 514}
514 515
515static int agp_amd64_probe(struct pci_dev *pdev, 516static int __devinit agp_amd64_probe(struct pci_dev *pdev,
516 const struct pci_device_id *ent) 517 const struct pci_device_id *ent)
517{ 518{
518 struct agp_bridge_data *bridge; 519 struct agp_bridge_data *bridge;
519 u8 cap_ptr; 520 u8 cap_ptr;
@@ -578,7 +579,7 @@ static int agp_amd64_probe(struct pci_dev *pdev,
578 return 0; 579 return 0;
579} 580}
580 581
581static void agp_amd64_remove(struct pci_dev *pdev) 582static void __devexit agp_amd64_remove(struct pci_dev *pdev)
582{ 583{
583 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 584 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
584 585
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0628d7b65c7..dc30e224349 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -445,7 +445,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
445}; 445};
446 446
447 447
448static struct agp_device_ids ati_agp_device_ids[] = 448static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
449{ 449{
450 { 450 {
451 .device_id = PCI_DEVICE_ID_ATI_RS100, 451 .device_id = PCI_DEVICE_ID_ATI_RS100,
@@ -490,7 +490,8 @@ static struct agp_device_ids ati_agp_device_ids[] =
490 { }, /* dummy final entry, always present */ 490 { }, /* dummy final entry, always present */
491}; 491};
492 492
493static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 493static int __devinit agp_ati_probe(struct pci_dev *pdev,
494 const struct pci_device_id *ent)
494{ 495{
495 struct agp_device_ids *devs = ati_agp_device_ids; 496 struct agp_device_ids *devs = ati_agp_device_ids;
496 struct agp_bridge_data *bridge; 497 struct agp_bridge_data *bridge;
@@ -532,7 +533,7 @@ found:
532 return agp_add_bridge(bridge); 533 return agp_add_bridge(bridge);
533} 534}
534 535
535static void agp_ati_remove(struct pci_dev *pdev) 536static void __devexit agp_ati_remove(struct pci_dev *pdev)
536{ 537{
537 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 538 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
538 539
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce832..f27d0d0816d 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -171,7 +171,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
171 } 171 }
172 got_gatt = 1; 172 got_gatt = 1;
173 173
174 bridge->key_list = vzalloc(PAGE_SIZE * 4); 174 bridge->key_list = vmalloc(PAGE_SIZE * 4);
175 if (bridge->key_list == NULL) { 175 if (bridge->key_list == NULL) {
176 dev_err(&bridge->dev->dev, 176 dev_err(&bridge->dev->dev,
177 "can't allocate memory for key lists\n"); 177 "can't allocate memory for key lists\n");
@@ -181,6 +181,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
181 got_keylist = 1; 181 got_keylist = 1;
182 182
183 /* FIXME vmalloc'd memory not guaranteed contiguous */ 183 /* FIXME vmalloc'd memory not guaranteed contiguous */
184 memset(bridge->key_list, 0, PAGE_SIZE * 4);
184 185
185 if (bridge->driver->configure()) { 186 if (bridge->driver->configure()) {
186 dev_err(&bridge->dev->dev, "error configuring host chipset\n"); 187 dev_err(&bridge->dev->dev, "error configuring host chipset\n");
@@ -194,10 +195,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
194 195
195err_out: 196err_out:
196 if (bridge->driver->needs_scratch_page) { 197 if (bridge->driver->needs_scratch_page) {
197 struct page *page = bridge->scratch_page_page; 198 void *va = page_address(bridge->scratch_page_page);
198 199
199 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); 200 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
200 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); 201 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
201 } 202 }
202 if (got_gatt) 203 if (got_gatt)
203 bridge->driver->free_gatt_table(bridge); 204 bridge->driver->free_gatt_table(bridge);
@@ -221,10 +222,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
221 222
222 if (bridge->driver->agp_destroy_page && 223 if (bridge->driver->agp_destroy_page &&
223 bridge->driver->needs_scratch_page) { 224 bridge->driver->needs_scratch_page) {
224 struct page *page = bridge->scratch_page_page; 225 void *va = page_address(bridge->scratch_page_page);
225 226
226 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); 227 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
227 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); 228 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
228 } 229 }
229} 230}
230 231
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 6974d503205..d607f53d8af 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -343,8 +343,8 @@ static const struct agp_bridge_driver efficeon_driver = {
343 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 343 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
344}; 344};
345 345
346static int agp_efficeon_probe(struct pci_dev *pdev, 346static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
347 const struct pci_device_id *ent) 347 const struct pci_device_id *ent)
348{ 348{
349 struct agp_bridge_data *bridge; 349 struct agp_bridge_data *bridge;
350 u8 cap_ptr; 350 u8 cap_ptr;
@@ -407,7 +407,7 @@ static int agp_efficeon_probe(struct pci_dev *pdev,
407 return agp_add_bridge(bridge); 407 return agp_add_bridge(bridge);
408} 408}
409 409
410static void agp_efficeon_remove(struct pci_dev *pdev) 410static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
411{ 411{
412 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 412 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
413 413
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index a0df182f6f7..b072648dc3f 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
514 switch (*bridge_agpstat & 7) { 514 switch (*bridge_agpstat & 7) {
515 case 4: 515 case 4:
516 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 516 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
517 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " 517 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
518 "Fixing up support for x2 & x1\n"); 518 "Fixing up support for x2 & x1\n");
519 break; 519 break;
520 case 2: 520 case 2:
521 *bridge_agpstat |= AGPSTAT2_1X; 521 *bridge_agpstat |= AGPSTAT2_1X;
522 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " 522 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
523 "Fixing up support for x1\n"); 523 "Fixing up support for x1\n");
524 break; 524 break;
525 default: 525 default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
693 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 693 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
694 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 694 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
695 } else { 695 } else {
696 printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); 696 printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
697 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 697 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
698 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 698 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
699 *bridge_agpstat, origbridge); 699 *bridge_agpstat, origbridge);
@@ -956,9 +956,9 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
956 bridge->driver->cache_flush(); 956 bridge->driver->cache_flush();
957#ifdef CONFIG_X86 957#ifdef CONFIG_X86
958 if (set_memory_uc((unsigned long)table, 1 << page_order)) 958 if (set_memory_uc((unsigned long)table, 1 << page_order))
959 printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); 959 printk(KERN_WARNING "Could not set GATT table memory to UC!");
960 960
961 bridge->gatt_table = (u32 __iomem *)table; 961 bridge->gatt_table = (void *)table;
962#else 962#else
963 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 963 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
964 (PAGE_SIZE * (1 << page_order))); 964 (PAGE_SIZE * (1 << page_order)));
@@ -1010,6 +1010,7 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
1010 case LVL2_APER_SIZE: 1010 case LVL2_APER_SIZE:
1011 /* The generic routines can't deal with 2 level gatt's */ 1011 /* The generic routines can't deal with 2 level gatt's */
1012 return -EINVAL; 1012 return -EINVAL;
1013 break;
1013 default: 1014 default:
1014 page_order = 0; 1015 page_order = 0;
1015 break; 1016 break;
@@ -1076,6 +1077,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1076 case LVL2_APER_SIZE: 1077 case LVL2_APER_SIZE:
1077 /* The generic routines can't deal with 2 level gatt's */ 1078 /* The generic routines can't deal with 2 level gatt's */
1078 return -EINVAL; 1079 return -EINVAL;
1080 break;
1079 default: 1081 default:
1080 num_entries = 0; 1082 num_entries = 0;
1081 break; 1083 break;
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 3695773ce7c..056b289a1e8 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -336,8 +336,7 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
336 off_t j, io_pg_start; 336 off_t j, io_pg_start;
337 int io_pg_count; 337 int io_pg_count;
338 338
339 if (type != mem->type || 339 if (type != 0 || mem->type != 0) {
340 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
341 return -EINVAL; 340 return -EINVAL;
342 } 341 }
343 342
@@ -381,8 +380,7 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
381 struct _hp_private *hp = &hp_private; 380 struct _hp_private *hp = &hp_private;
382 int i, io_pg_start, io_pg_count; 381 int i, io_pg_start, io_pg_count;
383 382
384 if (type != mem->type || 383 if (type != 0 || mem->type != 0) {
385 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
386 return -EINVAL; 384 return -EINVAL;
387 } 385 }
388 386
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 15b240ea484..75b763cb3ea 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -587,8 +587,8 @@ const struct agp_bridge_driver intel_i460_driver = {
587 .cant_use_aperture = true, 587 .cant_use_aperture = true,
588}; 588};
589 589
590static int agp_intel_i460_probe(struct pci_dev *pdev, 590static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
591 const struct pci_device_id *ent) 591 const struct pci_device_id *ent)
592{ 592{
593 struct agp_bridge_data *bridge; 593 struct agp_bridge_data *bridge;
594 u8 cap_ptr; 594 u8 cap_ptr;
@@ -611,7 +611,7 @@ static int agp_intel_i460_probe(struct pci_dev *pdev,
611 return agp_add_bridge(bridge); 611 return agp_add_bridge(bridge);
612} 612}
613 613
614static void agp_intel_i460_remove(struct pci_dev *pdev) 614static void __devexit agp_intel_i460_remove(struct pci_dev *pdev)
615{ 615{
616 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 616 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
617 617
@@ -637,7 +637,7 @@ static struct pci_driver agp_intel_i460_pci_driver = {
637 .name = "agpgart-intel-i460", 637 .name = "agpgart-intel-i460",
638 .id_table = agp_intel_i460_pci_table, 638 .id_table = agp_intel_i460_pci_table,
639 .probe = agp_intel_i460_probe, 639 .probe = agp_intel_i460_probe,
640 .remove = agp_intel_i460_remove, 640 .remove = __devexit_p(agp_intel_i460_remove),
641}; 641};
642 642
643static int __init agp_intel_i460_init(void) 643static int __init agp_intel_i460_init(void)
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a426ee1f57a..b427711be4b 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,7 +12,6 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <drm/intel-gtt.h>
16 15
17int intel_agp_enabled; 16int intel_agp_enabled;
18EXPORT_SYMBOL(intel_agp_enabled); 17EXPORT_SYMBOL(intel_agp_enabled);
@@ -732,8 +731,8 @@ static const struct intel_agp_driver_description {
732 { 0, NULL, NULL } 731 { 0, NULL, NULL }
733}; 732};
734 733
735static int agp_intel_probe(struct pci_dev *pdev, 734static int __devinit agp_intel_probe(struct pci_dev *pdev,
736 const struct pci_device_id *ent) 735 const struct pci_device_id *ent)
737{ 736{
738 struct agp_bridge_data *bridge; 737 struct agp_bridge_data *bridge;
739 u8 cap_ptr = 0; 738 u8 cap_ptr = 0;
@@ -748,7 +747,7 @@ static int agp_intel_probe(struct pci_dev *pdev,
748 747
749 bridge->capndx = cap_ptr; 748 bridge->capndx = cap_ptr;
750 749
751 if (intel_gmch_probe(pdev, NULL, bridge)) 750 if (intel_gmch_probe(pdev, bridge))
752 goto found_gmch; 751 goto found_gmch;
753 752
754 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 753 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -819,13 +818,13 @@ found_gmch:
819 return err; 818 return err;
820} 819}
821 820
822static void agp_intel_remove(struct pci_dev *pdev) 821static void __devexit agp_intel_remove(struct pci_dev *pdev)
823{ 822{
824 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 823 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
825 824
826 agp_remove_bridge(bridge); 825 agp_remove_bridge(bridge);
827 826
828 intel_gmch_remove(); 827 intel_gmch_remove(pdev);
829 828
830 agp_put_bridge(bridge); 829 agp_put_bridge(bridge);
831} 830}
@@ -851,7 +850,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
851 .subvendor = PCI_ANY_ID, \ 850 .subvendor = PCI_ANY_ID, \
852 .subdevice = PCI_ANY_ID, \ 851 .subdevice = PCI_ANY_ID, \
853 } 852 }
854 ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
855 ID(PCI_DEVICE_ID_INTEL_82443LX_0), 853 ID(PCI_DEVICE_ID_INTEL_82443LX_0),
856 ID(PCI_DEVICE_ID_INTEL_82443BX_0), 854 ID(PCI_DEVICE_ID_INTEL_82443BX_0),
857 ID(PCI_DEVICE_ID_INTEL_82443GX_0), 855 ID(PCI_DEVICE_ID_INTEL_82443GX_0),
@@ -899,10 +897,15 @@ static struct pci_device_id agp_intel_pci_table[] = {
899 ID(PCI_DEVICE_ID_INTEL_B43_HB), 897 ID(PCI_DEVICE_ID_INTEL_B43_HB),
900 ID(PCI_DEVICE_ID_INTEL_B43_1_HB), 898 ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), 899 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 900 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
905 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
906 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
906 { } 909 { }
907}; 910};
908 911
@@ -912,7 +915,7 @@ static struct pci_driver agp_intel_pci_driver = {
912 .name = "agpgart-intel", 915 .name = "agpgart-intel",
913 .id_table = agp_intel_pci_table, 916 .id_table = agp_intel_pci_table,
914 .probe = agp_intel_probe, 917 .probe = agp_intel_probe,
915 .remove = agp_intel_remove, 918 .remove = __devexit_p(agp_intel_remove),
916#ifdef CONFIG_PM 919#ifdef CONFIG_PM
917 .resume = agp_intel_resume, 920 .resume = agp_intel_resume,
918#endif 921#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 1042c1b9037..5da67f165af 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,6 +62,11 @@
62#define I810_PTE_LOCAL 0x00000002 62#define I810_PTE_LOCAL 0x00000002
63#define I810_PTE_VALID 0x00000001 63#define I810_PTE_VALID 0x00000001
64#define I830_PTE_SYSTEM_CACHED 0x00000006 64#define I830_PTE_SYSTEM_CACHED 0x00000006
65/* GT PTE cache control fields */
66#define GEN6_PTE_UNCACHED 0x00000002
67#define GEN6_PTE_LLC 0x00000004
68#define GEN6_PTE_LLC_MLC 0x00000006
69#define GEN6_PTE_GFDT 0x00000008
65 70
66#define I810_SMRAM_MISCC 0x70 71#define I810_SMRAM_MISCC 0x70
67#define I810_GFX_MEM_WIN_SIZE 0x00010000 72#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -141,6 +146,29 @@
141#define INTEL_I7505_AGPCTRL 0x70 146#define INTEL_I7505_AGPCTRL 0x70
142#define INTEL_I7505_MCHCFG 0x50 147#define INTEL_I7505_MCHCFG 0x50
143 148
149#define SNB_GMCH_CTRL 0x50
150#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
151#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
152#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
153#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
154#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
155#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
156#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
157#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
158#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
159#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
160#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
161#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
162#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
163#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
164#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
165#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
166#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
167#define SNB_GTT_SIZE_0M (0 << 8)
168#define SNB_GTT_SIZE_1M (1 << 8)
169#define SNB_GTT_SIZE_2M (2 << 8)
170#define SNB_GTT_SIZE_MASK (3 << 8)
171
144/* pci devices ids */ 172/* pci devices ids */
145#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 173#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
146#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a 174#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -183,11 +211,31 @@
183#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 211#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
184#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 212#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
185#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 213#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
186#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
187#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 214#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
188#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 215#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
189#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 216#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
190#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 217#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
191#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 218#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
192 219#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
220#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
221#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
222#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
223#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
224#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
228#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
229#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
230#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
231#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
232#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
233#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
234#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
237
238int intel_gmch_probe(struct pci_dev *pdev,
239 struct agp_bridge_data *bridge);
240void intel_gmch_remove(struct pci_dev *pdev);
193#endif 241#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea..2774ac1086d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -66,6 +66,7 @@ static struct _intel_private {
66 struct pci_dev *bridge_dev; 66 struct pci_dev *bridge_dev;
67 u8 __iomem *registers; 67 u8 __iomem *registers;
68 phys_addr_t gtt_bus_addr; 68 phys_addr_t gtt_bus_addr;
69 phys_addr_t gma_bus_addr;
69 u32 PGETBL_save; 70 u32 PGETBL_save;
70 u32 __iomem *gtt; /* I915G */ 71 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */ 72 bool clear_fake_agp; /* on first access via agp, fill with scratch */
@@ -75,7 +76,7 @@ static struct _intel_private {
75 struct resource ifp_resource; 76 struct resource ifp_resource;
76 int resource_valid; 77 int resource_valid;
77 struct page *scratch_page; 78 struct page *scratch_page;
78 int refcount; 79 dma_addr_t scratch_page_dma;
79} intel_private; 80} intel_private;
80 81
81#define INTEL_GTT_GEN intel_private.driver->gen 82#define INTEL_GTT_GEN intel_private.driver->gen
@@ -84,33 +85,40 @@ static struct _intel_private {
84#define IS_IRONLAKE intel_private.driver->is_ironlake 85#define IS_IRONLAKE intel_private.driver->is_ironlake
85#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable 86#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
86 87
87static int intel_gtt_map_memory(struct page **pages, 88int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
88 unsigned int num_entries, 89 struct scatterlist **sg_list, int *num_sg)
89 struct sg_table *st)
90{ 90{
91 struct sg_table st;
91 struct scatterlist *sg; 92 struct scatterlist *sg;
92 int i; 93 int i;
93 94
95 if (*sg_list)
96 return 0; /* already mapped (for e.g. resume */
97
94 DBG("try mapping %lu pages\n", (unsigned long)num_entries); 98 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
95 99
96 if (sg_alloc_table(st, num_entries, GFP_KERNEL)) 100 if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
97 goto err; 101 goto err;
98 102
99 for_each_sg(st->sgl, sg, num_entries, i) 103 *sg_list = sg = st.sgl;
104
105 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
100 sg_set_page(sg, pages[i], PAGE_SIZE, 0); 106 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
101 107
102 if (!pci_map_sg(intel_private.pcidev, 108 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
103 st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) 109 num_entries, PCI_DMA_BIDIRECTIONAL);
110 if (unlikely(!*num_sg))
104 goto err; 111 goto err;
105 112
106 return 0; 113 return 0;
107 114
108err: 115err:
109 sg_free_table(st); 116 sg_free_table(&st);
110 return -ENOMEM; 117 return -ENOMEM;
111} 118}
119EXPORT_SYMBOL(intel_gtt_map_memory);
112 120
113static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) 121void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
114{ 122{
115 struct sg_table st; 123 struct sg_table st;
116 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 124 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -123,6 +131,7 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
123 131
124 sg_free_table(&st); 132 sg_free_table(&st);
125} 133}
134EXPORT_SYMBOL(intel_gtt_unmap_memory);
126 135
127static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 136static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
128{ 137{
@@ -297,9 +306,9 @@ static int intel_gtt_setup_scratch_page(void)
297 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 306 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
298 return -EINVAL; 307 return -EINVAL;
299 308
300 intel_private.base.scratch_page_dma = dma_addr; 309 intel_private.scratch_page_dma = dma_addr;
301 } else 310 } else
302 intel_private.base.scratch_page_dma = page_to_phys(page); 311 intel_private.scratch_page_dma = page_to_phys(page);
303 312
304 intel_private.scratch_page = page; 313 intel_private.scratch_page = page;
305 314
@@ -367,6 +376,62 @@ static unsigned int intel_gtt_stolen_size(void)
367 stolen_size = 0; 376 stolen_size = 0;
368 break; 377 break;
369 } 378 }
379 } else if (INTEL_GTT_GEN == 6) {
380 /*
381 * SandyBridge has new memory control reg at 0x50.w
382 */
383 u16 snb_gmch_ctl;
384 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
385 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
386 case SNB_GMCH_GMS_STOLEN_32M:
387 stolen_size = MB(32);
388 break;
389 case SNB_GMCH_GMS_STOLEN_64M:
390 stolen_size = MB(64);
391 break;
392 case SNB_GMCH_GMS_STOLEN_96M:
393 stolen_size = MB(96);
394 break;
395 case SNB_GMCH_GMS_STOLEN_128M:
396 stolen_size = MB(128);
397 break;
398 case SNB_GMCH_GMS_STOLEN_160M:
399 stolen_size = MB(160);
400 break;
401 case SNB_GMCH_GMS_STOLEN_192M:
402 stolen_size = MB(192);
403 break;
404 case SNB_GMCH_GMS_STOLEN_224M:
405 stolen_size = MB(224);
406 break;
407 case SNB_GMCH_GMS_STOLEN_256M:
408 stolen_size = MB(256);
409 break;
410 case SNB_GMCH_GMS_STOLEN_288M:
411 stolen_size = MB(288);
412 break;
413 case SNB_GMCH_GMS_STOLEN_320M:
414 stolen_size = MB(320);
415 break;
416 case SNB_GMCH_GMS_STOLEN_352M:
417 stolen_size = MB(352);
418 break;
419 case SNB_GMCH_GMS_STOLEN_384M:
420 stolen_size = MB(384);
421 break;
422 case SNB_GMCH_GMS_STOLEN_416M:
423 stolen_size = MB(416);
424 break;
425 case SNB_GMCH_GMS_STOLEN_448M:
426 stolen_size = MB(448);
427 break;
428 case SNB_GMCH_GMS_STOLEN_480M:
429 stolen_size = MB(480);
430 break;
431 case SNB_GMCH_GMS_STOLEN_512M:
432 stolen_size = MB(512);
433 break;
434 }
370 } else { 435 } else {
371 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 436 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
372 case I855_GMCH_GMS_STOLEN_1M: 437 case I855_GMCH_GMS_STOLEN_1M:
@@ -500,9 +565,29 @@ static unsigned int i965_gtt_total_entries(void)
500 565
501static unsigned int intel_gtt_total_entries(void) 566static unsigned int intel_gtt_total_entries(void)
502{ 567{
568 int size;
569
503 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) 570 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
504 return i965_gtt_total_entries(); 571 return i965_gtt_total_entries();
505 else { 572 else if (INTEL_GTT_GEN == 6) {
573 u16 snb_gmch_ctl;
574
575 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
576 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
577 default:
578 case SNB_GTT_SIZE_0M:
579 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
580 size = MB(0);
581 break;
582 case SNB_GTT_SIZE_1M:
583 size = MB(1);
584 break;
585 case SNB_GTT_SIZE_2M:
586 size = MB(2);
587 break;
588 }
589 return size/4;
590 } else {
506 /* On previous hardware, the GTT size was just what was 591 /* On previous hardware, the GTT size was just what was
507 * required to map the aperture. 592 * required to map the aperture.
508 */ 593 */
@@ -546,7 +631,7 @@ static unsigned int intel_gtt_mappable_entries(void)
546static void intel_gtt_teardown_scratch_page(void) 631static void intel_gtt_teardown_scratch_page(void)
547{ 632{
548 set_pages_wb(intel_private.scratch_page, 1); 633 set_pages_wb(intel_private.scratch_page, 1);
549 pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, 634 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
550 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 635 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
551 put_page(intel_private.scratch_page); 636 put_page(intel_private.scratch_page);
552 __free_page(intel_private.scratch_page); 637 __free_page(intel_private.scratch_page);
@@ -564,7 +649,6 @@ static void intel_gtt_cleanup(void)
564 649
565static int intel_gtt_init(void) 650static int intel_gtt_init(void)
566{ 651{
567 u32 gma_addr;
568 u32 gtt_map_size; 652 u32 gtt_map_size;
569 int ret; 653 int ret;
570 654
@@ -590,19 +674,13 @@ static int intel_gtt_init(void)
590 674
591 gtt_map_size = intel_private.base.gtt_total_entries * 4; 675 gtt_map_size = intel_private.base.gtt_total_entries * 4;
592 676
593 intel_private.gtt = NULL; 677 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
594 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) 678 gtt_map_size);
595 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, 679 if (!intel_private.gtt) {
596 gtt_map_size);
597 if (intel_private.gtt == NULL)
598 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
599 gtt_map_size);
600 if (intel_private.gtt == NULL) {
601 intel_private.driver->cleanup(); 680 intel_private.driver->cleanup();
602 iounmap(intel_private.registers); 681 iounmap(intel_private.registers);
603 return -ENOMEM; 682 return -ENOMEM;
604 } 683 }
605 intel_private.base.gtt = intel_private.gtt;
606 684
607 global_cache_flush(); /* FIXME: ? */ 685 global_cache_flush(); /* FIXME: ? */
608 686
@@ -616,15 +694,6 @@ static int intel_gtt_init(void)
616 return ret; 694 return ret;
617 } 695 }
618 696
619 if (INTEL_GTT_GEN <= 2)
620 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
621 &gma_addr);
622 else
623 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
624 &gma_addr);
625
626 intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
627
628 return 0; 697 return 0;
629} 698}
630 699
@@ -698,10 +767,23 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
698 writel(addr | pte_flags, intel_private.gtt + entry); 767 writel(addr | pte_flags, intel_private.gtt + entry);
699} 768}
700 769
701bool intel_enable_gtt(void) 770static bool intel_enable_gtt(void)
702{ 771{
772 u32 gma_addr;
703 u8 __iomem *reg; 773 u8 __iomem *reg;
704 774
775 if (INTEL_GTT_GEN <= 2)
776 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
777 &gma_addr);
778 else
779 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
780 &gma_addr);
781
782 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
783
784 if (INTEL_GTT_GEN >= 6)
785 return true;
786
705 if (INTEL_GTT_GEN == 2) { 787 if (INTEL_GTT_GEN == 2) {
706 u16 gmch_ctrl; 788 u16 gmch_ctrl;
707 789
@@ -741,7 +823,6 @@ bool intel_enable_gtt(void)
741 823
742 return true; 824 return true;
743} 825}
744EXPORT_SYMBOL(intel_enable_gtt);
745 826
746static int i830_setup(void) 827static int i830_setup(void)
747{ 828{
@@ -779,7 +860,7 @@ static int intel_fake_agp_configure(void)
779 return -EIO; 860 return -EIO;
780 861
781 intel_private.clear_fake_agp = true; 862 intel_private.clear_fake_agp = true;
782 agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; 863 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
783 864
784 return 0; 865 return 0;
785} 866}
@@ -797,7 +878,8 @@ static bool i830_check_flags(unsigned int flags)
797 return false; 878 return false;
798} 879}
799 880
800void intel_gtt_insert_sg_entries(struct sg_table *st, 881void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
882 unsigned int sg_len,
801 unsigned int pg_start, 883 unsigned int pg_start,
802 unsigned int flags) 884 unsigned int flags)
803{ 885{
@@ -809,11 +891,12 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
809 891
810 /* sg may merge pages, but we have to separate 892 /* sg may merge pages, but we have to separate
811 * per-page addr for GTT */ 893 * per-page addr for GTT */
812 for_each_sg(st->sgl, sg, st->nents, i) { 894 for_each_sg(sg_list, sg, sg_len, i) {
813 len = sg_dma_len(sg) >> PAGE_SHIFT; 895 len = sg_dma_len(sg) >> PAGE_SHIFT;
814 for (m = 0; m < len; m++) { 896 for (m = 0; m < len; m++) {
815 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 897 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
816 intel_private.driver->write_entry(addr, j, flags); 898 intel_private.driver->write_entry(addr,
899 j, flags);
817 j++; 900 j++;
818 } 901 }
819 } 902 }
@@ -821,10 +904,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
821} 904}
822EXPORT_SYMBOL(intel_gtt_insert_sg_entries); 905EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
823 906
824static void intel_gtt_insert_pages(unsigned int first_entry, 907void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
825 unsigned int num_entries, 908 struct page **pages, unsigned int flags)
826 struct page **pages,
827 unsigned int flags)
828{ 909{
829 int i, j; 910 int i, j;
830 911
@@ -835,15 +916,13 @@ static void intel_gtt_insert_pages(unsigned int first_entry,
835 } 916 }
836 readl(intel_private.gtt+j-1); 917 readl(intel_private.gtt+j-1);
837} 918}
919EXPORT_SYMBOL(intel_gtt_insert_pages);
838 920
839static int intel_fake_agp_insert_entries(struct agp_memory *mem, 921static int intel_fake_agp_insert_entries(struct agp_memory *mem,
840 off_t pg_start, int type) 922 off_t pg_start, int type)
841{ 923{
842 int ret = -EINVAL; 924 int ret = -EINVAL;
843 925
844 if (intel_private.base.do_idle_maps)
845 return -ENODEV;
846
847 if (intel_private.clear_fake_agp) { 926 if (intel_private.clear_fake_agp) {
848 int start = intel_private.base.stolen_size / PAGE_SIZE; 927 int start = intel_private.base.stolen_size / PAGE_SIZE;
849 int end = intel_private.base.gtt_mappable_entries; 928 int end = intel_private.base.gtt_mappable_entries;
@@ -870,15 +949,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
870 global_cache_flush(); 949 global_cache_flush();
871 950
872 if (intel_private.base.needs_dmar) { 951 if (intel_private.base.needs_dmar) {
873 struct sg_table st; 952 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
874 953 &mem->sg_list, &mem->num_sg);
875 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
876 if (ret != 0) 954 if (ret != 0)
877 return ret; 955 return ret;
878 956
879 intel_gtt_insert_sg_entries(&st, pg_start, type); 957 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
880 mem->sg_list = st.sgl; 958 pg_start, type);
881 mem->num_sg = st.nents;
882 } else 959 } else
883 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, 960 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
884 type); 961 type);
@@ -895,7 +972,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
895 unsigned int i; 972 unsigned int i;
896 973
897 for (i = first_entry; i < (first_entry + num_entries); i++) { 974 for (i = first_entry; i < (first_entry + num_entries); i++) {
898 intel_private.driver->write_entry(intel_private.base.scratch_page_dma, 975 intel_private.driver->write_entry(intel_private.scratch_page_dma,
899 i, 0); 976 i, 0);
900 } 977 }
901 readl(intel_private.gtt+i-1); 978 readl(intel_private.gtt+i-1);
@@ -908,9 +985,6 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
908 if (mem->page_count == 0) 985 if (mem->page_count == 0)
909 return 0; 986 return 0;
910 987
911 if (intel_private.base.do_idle_maps)
912 return -ENODEV;
913
914 intel_gtt_clear_range(pg_start, mem->page_count); 988 intel_gtt_clear_range(pg_start, mem->page_count);
915 989
916 if (intel_private.base.needs_dmar) { 990 if (intel_private.base.needs_dmar) {
@@ -1070,54 +1144,72 @@ static void i965_write_entry(dma_addr_t addr,
1070 writel(addr | pte_flags, intel_private.gtt + entry); 1144 writel(addr | pte_flags, intel_private.gtt + entry);
1071} 1145}
1072 1146
1073/* Certain Gen5 chipsets require require idling the GPU before 1147static bool gen6_check_flags(unsigned int flags)
1074 * unmapping anything from the GTT when VT-d is enabled.
1075 */
1076static inline int needs_idle_maps(void)
1077{ 1148{
1078#ifdef CONFIG_INTEL_IOMMU 1149 return true;
1079 const unsigned short gpu_devid = intel_private.pcidev->device; 1150}
1080 1151
1081 /* Query intel_iommu to see if we need the workaround. Presumably that 1152static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1082 * was loaded first. 1153 unsigned int flags)
1083 */ 1154{
1084 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || 1155 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1085 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1156 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1086 intel_iommu_gfx_mapped) 1157 u32 pte_flags;
1087 return 1; 1158
1088#endif 1159 if (type_mask == AGP_USER_MEMORY)
1089 return 0; 1160 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1161 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1162 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1163 if (gfdt)
1164 pte_flags |= GEN6_PTE_GFDT;
1165 } else { /* set 'normal'/'cached' to LLC by default */
1166 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1167 if (gfdt)
1168 pte_flags |= GEN6_PTE_GFDT;
1169 }
1170
1171 /* gen6 has bit11-4 for physical addr bit39-32 */
1172 addr |= (addr >> 28) & 0xff0;
1173 writel(addr | pte_flags, intel_private.gtt + entry);
1174}
1175
1176static void gen6_cleanup(void)
1177{
1090} 1178}
1091 1179
1092static int i9xx_setup(void) 1180static int i9xx_setup(void)
1093{ 1181{
1094 u32 reg_addr, gtt_addr; 1182 u32 reg_addr;
1095 int size = KB(512);
1096 1183
1097 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); 1184 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1098 1185
1099 reg_addr &= 0xfff80000; 1186 reg_addr &= 0xfff80000;
1100 1187
1101 intel_private.registers = ioremap(reg_addr, size); 1188 intel_private.registers = ioremap(reg_addr, 128 * 4096);
1102 if (!intel_private.registers) 1189 if (!intel_private.registers)
1103 return -ENOMEM; 1190 return -ENOMEM;
1104 1191
1105 switch (INTEL_GTT_GEN) { 1192 if (INTEL_GTT_GEN == 3) {
1106 case 3: 1193 u32 gtt_addr;
1194
1107 pci_read_config_dword(intel_private.pcidev, 1195 pci_read_config_dword(intel_private.pcidev,
1108 I915_PTEADDR, &gtt_addr); 1196 I915_PTEADDR, &gtt_addr);
1109 intel_private.gtt_bus_addr = gtt_addr; 1197 intel_private.gtt_bus_addr = gtt_addr;
1110 break; 1198 } else {
1111 case 5: 1199 u32 gtt_offset;
1112 intel_private.gtt_bus_addr = reg_addr + MB(2);
1113 break;
1114 default:
1115 intel_private.gtt_bus_addr = reg_addr + KB(512);
1116 break;
1117 }
1118 1200
1119 if (needs_idle_maps()) 1201 switch (INTEL_GTT_GEN) {
1120 intel_private.base.do_idle_maps = 1; 1202 case 5:
1203 case 6:
1204 gtt_offset = MB(2);
1205 break;
1206 case 4:
1207 default:
1208 gtt_offset = KB(512);
1209 break;
1210 }
1211 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1212 }
1121 1213
1122 intel_i9xx_setup_flush(); 1214 intel_i9xx_setup_flush();
1123 1215
@@ -1225,6 +1317,15 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
1225 .check_flags = i830_check_flags, 1317 .check_flags = i830_check_flags,
1226 .chipset_flush = i9xx_chipset_flush, 1318 .chipset_flush = i9xx_chipset_flush,
1227}; 1319};
1320static const struct intel_gtt_driver sandybridge_gtt_driver = {
1321 .gen = 6,
1322 .setup = i9xx_setup,
1323 .cleanup = gen6_cleanup,
1324 .write_entry = gen6_write_entry,
1325 .dma_mask_size = 40,
1326 .check_flags = gen6_check_flags,
1327 .chipset_flush = i9xx_chipset_flush,
1328};
1228 1329
1229/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1330/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1230 * driver and gmch_driver must be non-null, and find_gmch will determine 1331 * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1305,6 +1406,30 @@ static const struct intel_gtt_driver_description {
1305 "HD Graphics", &ironlake_gtt_driver }, 1406 "HD Graphics", &ironlake_gtt_driver },
1306 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1407 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1307 "HD Graphics", &ironlake_gtt_driver }, 1408 "HD Graphics", &ironlake_gtt_driver },
1409 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1410 "Sandybridge", &sandybridge_gtt_driver },
1411 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1412 "Sandybridge", &sandybridge_gtt_driver },
1413 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1414 "Sandybridge", &sandybridge_gtt_driver },
1415 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1416 "Sandybridge", &sandybridge_gtt_driver },
1417 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1418 "Sandybridge", &sandybridge_gtt_driver },
1419 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1420 "Sandybridge", &sandybridge_gtt_driver },
1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1422 "Sandybridge", &sandybridge_gtt_driver },
1423 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
1424 "Ivybridge", &sandybridge_gtt_driver },
1425 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
1426 "Ivybridge", &sandybridge_gtt_driver },
1427 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
1428 "Ivybridge", &sandybridge_gtt_driver },
1429 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
1430 "Ivybridge", &sandybridge_gtt_driver },
1431 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
1432 "Ivybridge", &sandybridge_gtt_driver },
1308 { 0, NULL, NULL } 1433 { 0, NULL, NULL }
1309}; 1434};
1310 1435
@@ -1325,32 +1450,14 @@ static int find_gmch(u16 device)
1325 return 1; 1450 return 1;
1326} 1451}
1327 1452
1328int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 1453int intel_gmch_probe(struct pci_dev *pdev,
1329 struct agp_bridge_data *bridge) 1454 struct agp_bridge_data *bridge)
1330{ 1455{
1331 int i, mask; 1456 int i, mask;
1332 1457 intel_private.driver = NULL;
1333 /*
1334 * Can be called from the fake agp driver but also directly from
1335 * drm/i915.ko. Hence we need to check whether everything is set up
1336 * already.
1337 */
1338 if (intel_private.driver) {
1339 intel_private.refcount++;
1340 return 1;
1341 }
1342 1458
1343 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1459 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1344 if (gpu_pdev) { 1460 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1345 if (gpu_pdev->device ==
1346 intel_gtt_chipsets[i].gmch_chip_id) {
1347 intel_private.pcidev = pci_dev_get(gpu_pdev);
1348 intel_private.driver =
1349 intel_gtt_chipsets[i].gtt_driver;
1350
1351 break;
1352 }
1353 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1354 intel_private.driver = 1461 intel_private.driver =
1355 intel_gtt_chipsets[i].gtt_driver; 1462 intel_gtt_chipsets[i].gtt_driver;
1356 break; 1463 break;
@@ -1360,17 +1467,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1360 if (!intel_private.driver) 1467 if (!intel_private.driver)
1361 return 0; 1468 return 0;
1362 1469
1363 intel_private.refcount++; 1470 bridge->driver = &intel_fake_agp_driver;
1471 bridge->dev_private_data = &intel_private;
1472 bridge->dev = pdev;
1364 1473
1365 if (bridge) { 1474 intel_private.bridge_dev = pci_dev_get(pdev);
1366 bridge->driver = &intel_fake_agp_driver;
1367 bridge->dev_private_data = &intel_private;
1368 bridge->dev = bridge_pdev;
1369 }
1370
1371 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1372 1475
1373 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); 1476 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1374 1477
1375 mask = intel_private.driver->dma_mask_size; 1478 mask = intel_private.driver->dma_mask_size;
1376 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) 1479 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
@@ -1380,17 +1483,17 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1380 pci_set_consistent_dma_mask(intel_private.pcidev, 1483 pci_set_consistent_dma_mask(intel_private.pcidev,
1381 DMA_BIT_MASK(mask)); 1484 DMA_BIT_MASK(mask));
1382 1485
1383 if (intel_gtt_init() != 0) { 1486 /*if (bridge->driver == &intel_810_driver)
1384 intel_gmch_remove(); 1487 return 1;*/
1385 1488
1489 if (intel_gtt_init() != 0)
1386 return 0; 1490 return 0;
1387 }
1388 1491
1389 return 1; 1492 return 1;
1390} 1493}
1391EXPORT_SYMBOL(intel_gmch_probe); 1494EXPORT_SYMBOL(intel_gmch_probe);
1392 1495
1393struct intel_gtt *intel_gtt_get(void) 1496const struct intel_gtt *intel_gtt_get(void)
1394{ 1497{
1395 return &intel_private.base; 1498 return &intel_private.base;
1396} 1499}
@@ -1403,16 +1506,12 @@ void intel_gtt_chipset_flush(void)
1403} 1506}
1404EXPORT_SYMBOL(intel_gtt_chipset_flush); 1507EXPORT_SYMBOL(intel_gtt_chipset_flush);
1405 1508
1406void intel_gmch_remove(void) 1509void intel_gmch_remove(struct pci_dev *pdev)
1407{ 1510{
1408 if (--intel_private.refcount)
1409 return;
1410
1411 if (intel_private.pcidev) 1511 if (intel_private.pcidev)
1412 pci_dev_put(intel_private.pcidev); 1512 pci_dev_put(intel_private.pcidev);
1413 if (intel_private.bridge_dev) 1513 if (intel_private.bridge_dev)
1414 pci_dev_put(intel_private.bridge_dev); 1514 pci_dev_put(intel_private.bridge_dev);
1415 intel_private.driver = NULL;
1416} 1515}
1417EXPORT_SYMBOL(intel_gmch_remove); 1516EXPORT_SYMBOL(intel_gmch_remove);
1418 1517
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 62be3ec0da4..b9734a97818 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -332,8 +332,8 @@ static const struct agp_bridge_driver nvidia_driver = {
332 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 332 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
333}; 333};
334 334
335static int agp_nvidia_probe(struct pci_dev *pdev, 335static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
336 const struct pci_device_id *ent) 336 const struct pci_device_id *ent)
337{ 337{
338 struct agp_bridge_data *bridge; 338 struct agp_bridge_data *bridge;
339 u8 cap_ptr; 339 u8 cap_ptr;
@@ -388,7 +388,7 @@ static int agp_nvidia_probe(struct pci_dev *pdev,
388 return agp_add_bridge(bridge); 388 return agp_add_bridge(bridge);
389} 389}
390 390
391static void agp_nvidia_remove(struct pci_dev *pdev) 391static void __devexit agp_nvidia_remove(struct pci_dev *pdev)
392{ 392{
393 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 393 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
394 394
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 05b8d0241bd..ffa888cd1c8 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -158,6 +158,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
158 break; 158 break;
159 case LVL2_APER_SIZE: 159 case LVL2_APER_SIZE:
160 return -EINVAL; 160 return -EINVAL;
161 break;
161 default: 162 default:
162 num_entries = 0; 163 num_entries = 0;
163 break; 164 break;
@@ -270,7 +271,7 @@ const struct agp_bridge_driver sgi_tioca_driver = {
270 .num_aperture_sizes = 1, 271 .num_aperture_sizes = 1,
271}; 272};
272 273
273static int agp_sgi_init(void) 274static int __devinit agp_sgi_init(void)
274{ 275{
275 unsigned int j; 276 unsigned int j;
276 struct tioca_kernel *info; 277 struct tioca_kernel *info;
@@ -289,11 +290,12 @@ static int agp_sgi_init(void)
289 290
290 j = 0; 291 j = 0;
291 list_for_each_entry(info, &tioca_list, ca_list) { 292 list_for_each_entry(info, &tioca_list, ca_list) {
293 struct list_head *tmp;
292 if (list_empty(info->ca_devices)) 294 if (list_empty(info->ca_devices))
293 continue; 295 continue;
294 list_for_each_entry(pdev, info->ca_devices, bus_list) { 296 list_for_each(tmp, info->ca_devices) {
295 u8 cap_ptr; 297 u8 cap_ptr;
296 298 pdev = pci_dev_b(tmp);
297 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) 299 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
298 continue; 300 continue;
299 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 301 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
@@ -327,7 +329,7 @@ static int agp_sgi_init(void)
327 return 0; 329 return 0;
328} 330}
329 331
330static void agp_sgi_cleanup(void) 332static void __devexit agp_sgi_cleanup(void)
331{ 333{
332 kfree(sgi_tioca_agp_bridges); 334 kfree(sgi_tioca_agp_bridges);
333 sgi_tioca_agp_bridges = NULL; 335 sgi_tioca_agp_bridges = NULL;
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 79c838c434b..29aacd81de7 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -17,8 +17,8 @@
17#define PCI_DEVICE_ID_SI_662 0x0662 17#define PCI_DEVICE_ID_SI_662 0x0662
18#define PCI_DEVICE_ID_SI_671 0x0671 18#define PCI_DEVICE_ID_SI_671 0x0671
19 19
20static bool agp_sis_force_delay = 0; 20static int __devinitdata agp_sis_force_delay = 0;
21static int agp_sis_agp_spec = -1; 21static int __devinitdata agp_sis_agp_spec = -1;
22 22
23static int sis_fetch_size(void) 23static int sis_fetch_size(void)
24{ 24{
@@ -148,13 +148,13 @@ static struct agp_bridge_driver sis_driver = {
148}; 148};
149 149
150// chipsets that require the 'delay hack' 150// chipsets that require the 'delay hack'
151static int sis_broken_chipsets[] = { 151static int sis_broken_chipsets[] __devinitdata = {
152 PCI_DEVICE_ID_SI_648, 152 PCI_DEVICE_ID_SI_648,
153 PCI_DEVICE_ID_SI_746, 153 PCI_DEVICE_ID_SI_746,
154 0 // terminator 154 0 // terminator
155}; 155};
156 156
157static void sis_get_driver(struct agp_bridge_data *bridge) 157static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
158{ 158{
159 int i; 159 int i;
160 160
@@ -180,7 +180,8 @@ static void sis_get_driver(struct agp_bridge_data *bridge)
180} 180}
181 181
182 182
183static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 183static int __devinit agp_sis_probe(struct pci_dev *pdev,
184 const struct pci_device_id *ent)
184{ 185{
185 struct agp_bridge_data *bridge; 186 struct agp_bridge_data *bridge;
186 u8 cap_ptr; 187 u8 cap_ptr;
@@ -210,7 +211,7 @@ static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
210 return agp_add_bridge(bridge); 211 return agp_add_bridge(bridge);
211} 212}
212 213
213static void agp_sis_remove(struct pci_dev *pdev) 214static void __devexit agp_sis_remove(struct pci_dev *pdev)
214{ 215{
215 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 216 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
216 217
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 9b163b49d97..f02f9b07fd4 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -445,8 +445,8 @@ static const struct agp_bridge_driver sworks_driver = {
445 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 445 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
446}; 446};
447 447
448static int agp_serverworks_probe(struct pci_dev *pdev, 448static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
449 const struct pci_device_id *ent) 449 const struct pci_device_id *ent)
450{ 450{
451 struct agp_bridge_data *bridge; 451 struct agp_bridge_data *bridge;
452 struct pci_dev *bridge_dev; 452 struct pci_dev *bridge_dev;
@@ -518,7 +518,7 @@ static int agp_serverworks_probe(struct pci_dev *pdev,
518 return agp_add_bridge(bridge); 518 return agp_add_bridge(bridge);
519} 519}
520 520
521static void agp_serverworks_remove(struct pci_dev *pdev) 521static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
522{ 522{
523 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 523 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
524 524
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index a56ee9bedd1..a32c492baf5 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -557,7 +557,7 @@ const struct agp_bridge_driver u3_agp_driver = {
557 .needs_scratch_page = true, 557 .needs_scratch_page = true,
558}; 558};
559 559
560static struct agp_device_ids uninorth_agp_device_ids[] = { 560static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
561 { 561 {
562 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP, 562 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
563 .chipset_name = "UniNorth", 563 .chipset_name = "UniNorth",
@@ -592,8 +592,8 @@ static struct agp_device_ids uninorth_agp_device_ids[] = {
592 }, 592 },
593}; 593};
594 594
595static int agp_uninorth_probe(struct pci_dev *pdev, 595static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
596 const struct pci_device_id *ent) 596 const struct pci_device_id *ent)
597{ 597{
598 struct agp_device_ids *devs = uninorth_agp_device_ids; 598 struct agp_device_ids *devs = uninorth_agp_device_ids;
599 struct agp_bridge_data *bridge; 599 struct agp_bridge_data *bridge;
@@ -663,7 +663,7 @@ static int agp_uninorth_probe(struct pci_dev *pdev,
663 return agp_add_bridge(bridge); 663 return agp_add_bridge(bridge);
664} 664}
665 665
666static void agp_uninorth_remove(struct pci_dev *pdev) 666static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
667{ 667{
668 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 668 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
669 669
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 74d3aa3773b..8bc38493740 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -224,7 +224,7 @@ static const struct agp_bridge_driver via_driver = {
224 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 224 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
225}; 225};
226 226
227static struct agp_device_ids via_agp_device_ids[] = 227static struct agp_device_ids via_agp_device_ids[] __devinitdata =
228{ 228{
229 { 229 {
230 .device_id = PCI_DEVICE_ID_VIA_82C597_0, 230 .device_id = PCI_DEVICE_ID_VIA_82C597_0,
@@ -438,7 +438,8 @@ static void check_via_agp3 (struct agp_bridge_data *bridge)
438} 438}
439 439
440 440
441static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 441static int __devinit agp_via_probe(struct pci_dev *pdev,
442 const struct pci_device_id *ent)
442{ 443{
443 struct agp_device_ids *devs = via_agp_device_ids; 444 struct agp_device_ids *devs = via_agp_device_ids;
444 struct agp_bridge_data *bridge; 445 struct agp_bridge_data *bridge;
@@ -484,7 +485,7 @@ static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
484 return agp_add_bridge(bridge); 485 return agp_add_bridge(bridge);
485} 486}
486 487
487static void agp_via_remove(struct pci_dev *pdev) 488static void __devexit agp_via_remove(struct pci_dev *pdev)
488{ 489{
489 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 490 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
490 491
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 46118f84594..a7346ab97a3 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -31,6 +31,7 @@
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34#include <asm/system.h>
34 35
35/* 36/*
36 * The apm_bios device is one of the misc char devices. 37 * The apm_bios device is one of the misc char devices.
@@ -39,7 +40,10 @@
39#define APM_MINOR_DEV 134 40#define APM_MINOR_DEV 134
40 41
41/* 42/*
42 * One option can be changed at boot time as follows: 43 * See Documentation/Config.help for the configuration options.
44 *
45 * Various options can be changed at boot time as follows:
46 * (We allow underscores for compatibility with the modules code)
43 * apm=on/off enable/disable APM 47 * apm=on/off enable/disable APM
44 */ 48 */
45 49
@@ -296,13 +300,17 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg)
296 /* 300 /*
297 * Wait for the suspend/resume to complete. If there 301 * Wait for the suspend/resume to complete. If there
298 * are pending acknowledges, we wait here for them. 302 * are pending acknowledges, we wait here for them.
299 * wait_event_freezable() is interruptible and pending
300 * signal can cause busy looping. We aren't doing
301 * anything critical, chill a bit on each iteration.
302 */ 303 */
303 while (wait_event_freezable(apm_suspend_waitqueue, 304 freezer_do_not_count();
304 as->suspend_state != SUSPEND_ACKED)) 305
305 msleep(10); 306 wait_event(apm_suspend_waitqueue,
307 as->suspend_state == SUSPEND_DONE);
308
309 /*
310 * Since we are waiting until the suspend is done, the
311 * try_to_freeze() in freezer_count() will not trigger
312 */
313 freezer_count();
306 break; 314 break;
307 case SUSPEND_ACKTO: 315 case SUSPEND_ACKTO:
308 as->suspend_result = -ETIMEDOUT; 316 as->suspend_result = -ETIMEDOUT;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 97467053a01..0c688232aab 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -297,6 +297,7 @@ static int __init bsr_init(void)
297 struct device_node *np; 297 struct device_node *np;
298 dev_t bsr_dev; 298 dev_t bsr_dev;
299 int ret = -ENODEV; 299 int ret = -ENODEV;
300 int result;
300 301
301 np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); 302 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
302 if (!np) 303 if (!np)
@@ -305,14 +306,13 @@ static int __init bsr_init(void)
305 bsr_class = class_create(THIS_MODULE, "bsr"); 306 bsr_class = class_create(THIS_MODULE, "bsr");
306 if (IS_ERR(bsr_class)) { 307 if (IS_ERR(bsr_class)) {
307 printk(KERN_ERR "class_create() failed for bsr_class\n"); 308 printk(KERN_ERR "class_create() failed for bsr_class\n");
308 ret = PTR_ERR(bsr_class);
309 goto out_err_1; 309 goto out_err_1;
310 } 310 }
311 bsr_class->dev_attrs = bsr_dev_attrs; 311 bsr_class->dev_attrs = bsr_dev_attrs;
312 312
313 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); 313 result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
314 bsr_major = MAJOR(bsr_dev); 314 bsr_major = MAJOR(bsr_dev);
315 if (ret < 0) { 315 if (result < 0) {
316 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); 316 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
317 goto out_err_2; 317 goto out_err_2;
318 } 318 }
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index 7d34b203718..ed8303f9890 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -24,6 +24,7 @@
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <asm/system.h>
27#include <asm/rtc.h> 28#include <asm/rtc.h>
28#if defined(CONFIG_M32R) 29#if defined(CONFIG_M32R)
29#include <asm/m32r.h> 30#include <asm/m32r.h>
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 24ffd8cec51..aab9605f0b4 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -74,21 +74,21 @@ static inline void netwinder_ds1620_reset(void)
74 74
75static inline void netwinder_lock(unsigned long *flags) 75static inline void netwinder_lock(unsigned long *flags)
76{ 76{
77 raw_spin_lock_irqsave(&nw_gpio_lock, *flags); 77 spin_lock_irqsave(&nw_gpio_lock, *flags);
78} 78}
79 79
80static inline void netwinder_unlock(unsigned long *flags) 80static inline void netwinder_unlock(unsigned long *flags)
81{ 81{
82 raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags); 82 spin_unlock_irqrestore(&nw_gpio_lock, *flags);
83} 83}
84 84
85static inline void netwinder_set_fan(int i) 85static inline void netwinder_set_fan(int i)
86{ 86{
87 unsigned long flags; 87 unsigned long flags;
88 88
89 raw_spin_lock_irqsave(&nw_gpio_lock, flags); 89 spin_lock_irqsave(&nw_gpio_lock, flags);
90 nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0); 90 nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
91 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 91 spin_unlock_irqrestore(&nw_gpio_lock, flags);
92} 92}
93 93
94static inline int netwinder_get_fan(void) 94static inline int netwinder_get_fan(void)
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index a082d00b0f1..53c524e7b82 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -37,6 +37,7 @@
37#include <linux/efi.h> 37#include <linux/efi.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39 39
40#include <asm/system.h>
40 41
41#define EFI_RTC_VERSION "0.4" 42#define EFI_RTC_VERSION "0.4"
42 43
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 21cb980f115..f773a9dd14f 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -56,6 +56,7 @@
56#include <linux/workqueue.h> 56#include <linux/workqueue.h>
57 57
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <asm/system.h>
59#include <asm/rtc.h> 60#include <asm/rtc.h>
60 61
61/* 62/*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index fe6d4be4829..0833896cf6f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -36,6 +36,7 @@
36#include <linux/io.h> 36#include <linux/io.h>
37 37
38#include <asm/current.h> 38#include <asm/current.h>
39#include <asm/system.h>
39#include <asm/irq.h> 40#include <asm/irq.h>
40#include <asm/div64.h> 41#include <asm/div64.h>
41 42
@@ -816,7 +817,7 @@ static unsigned long __hpet_calibrate(struct hpets *hpetp)
816 817
817static unsigned long hpet_calibrate(struct hpets *hpetp) 818static unsigned long hpet_calibrate(struct hpets *hpetp)
818{ 819{
819 unsigned long ret = ~0UL; 820 unsigned long ret = -1;
820 unsigned long tmp; 821 unsigned long tmp;
821 822
822 /* 823 /*
@@ -906,8 +907,8 @@ int hpet_alloc(struct hpet_data *hdp)
906 hpetp->hp_which, hdp->hd_phys_address, 907 hpetp->hp_which, hdp->hd_phys_address,
907 hpetp->hp_ntimer > 1 ? "s" : ""); 908 hpetp->hp_ntimer > 1 ? "s" : "");
908 for (i = 0; i < hpetp->hp_ntimer; i++) 909 for (i = 0; i < hpetp->hp_ntimer; i++)
909 printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 910 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
910 printk(KERN_CONT "\n"); 911 printk("\n");
911 912
912 temp = hpetp->hp_tick_freq; 913 temp = hpetp->hp_tick_freq;
913 remainder = do_div(temp, 1000000); 914 remainder = do_div(temp, 1000000);
@@ -1001,9 +1002,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
1001 irqp = &res->data.extended_irq; 1002 irqp = &res->data.extended_irq;
1002 1003
1003 for (i = 0; i < irqp->interrupt_count; i++) { 1004 for (i = 0; i < irqp->interrupt_count; i++) {
1004 if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
1005 break;
1006
1007 irq = acpi_register_gsi(NULL, irqp->interrupts[i], 1005 irq = acpi_register_gsi(NULL, irqp->interrupts[i],
1008 irqp->triggering, irqp->polarity); 1006 irqp->triggering, irqp->polarity);
1009 if (irq < 0) 1007 if (irq < 0)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c5a0262251b..1d2ebc7a494 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -60,33 +60,6 @@ config HW_RANDOM_AMD
60 60
61 If unsure, say Y. 61 If unsure, say Y.
62 62
63config HW_RANDOM_ATMEL
64 tristate "Atmel Random Number Generator support"
65 depends on HW_RANDOM && HAVE_CLK
66 default (HW_RANDOM && ARCH_AT91)
67 ---help---
68 This driver provides kernel-side support for the Random Number
69 Generator hardware found on Atmel AT91 devices.
70
71 To compile this driver as a module, choose M here: the
72 module will be called atmel-rng.
73
74 If unsure, say Y.
75
76config HW_RANDOM_BCM63XX
77 tristate "Broadcom BCM63xx Random Number Generator support"
78 depends on HW_RANDOM && BCM63XX
79 default HW_RANDOM
80 ---help---
81 This driver provides kernel-side support for the Random Number
82 Generator hardware found on the Broadcom BCM63xx SoCs.
83
84 To compile this driver as a module, choose M here: the
85 module will be called bcm63xx-rng
86
87 If unusure, say Y.
88
89
90config HW_RANDOM_GEODE 63config HW_RANDOM_GEODE
91 tristate "AMD Geode HW Random Number Generator support" 64 tristate "AMD Geode HW Random Number Generator support"
92 depends on HW_RANDOM && X86_32 && PCI 65 depends on HW_RANDOM && X86_32 && PCI
@@ -127,12 +100,12 @@ config HW_RANDOM_VIA
127 If unsure, say Y. 100 If unsure, say Y.
128 101
129config HW_RANDOM_IXP4XX 102config HW_RANDOM_IXP4XX
130 tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" 103 tristate "Intel IXP4xx NPU HW Random Number Generator support"
131 depends on HW_RANDOM && ARCH_IXP4XX 104 depends on HW_RANDOM && ARCH_IXP4XX
132 default HW_RANDOM 105 default HW_RANDOM
133 ---help--- 106 ---help---
134 This driver provides kernel-side support for the Pseudo-Random 107 This driver provides kernel-side support for the Random
135 Number Generator hardware found on the Intel IXP45x/46x NPU. 108 Number Generator hardware found on the Intel IXP4xx NPU.
136 109
137 To compile this driver as a module, choose M here: the 110 To compile this driver as a module, choose M here: the
138 module will be called ixp4xx-rng. 111 module will be called ixp4xx-rng.
@@ -216,7 +189,7 @@ config HW_RANDOM_MXC_RNGA
216 189
217config HW_RANDOM_NOMADIK 190config HW_RANDOM_NOMADIK
218 tristate "ST-Ericsson Nomadik Random Number Generator support" 191 tristate "ST-Ericsson Nomadik Random Number Generator support"
219 depends on HW_RANDOM && ARCH_NOMADIK 192 depends on HW_RANDOM && PLAT_NOMADIK
220 ---help--- 193 ---help---
221 This driver provides kernel-side support for the Random Number 194 This driver provides kernel-side support for the Random Number
222 Generator hardware found on ST-Ericsson SoCs (8815 and 8500). 195 Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
@@ -249,56 +222,3 @@ config HW_RANDOM_PPC4XX
249 module will be called ppc4xx-rng. 222 module will be called ppc4xx-rng.
250 223
251 If unsure, say N. 224 If unsure, say N.
252
253config UML_RANDOM
254 depends on UML
255 tristate "Hardware random number generator"
256 help
257 This option enables UML's "hardware" random number generator. It
258 attaches itself to the host's /dev/random, supplying as much entropy
259 as the host has, rather than the small amount the UML gets from its
260 own drivers. It registers itself as a standard hardware random number
261 generator, major 10, minor 183, and the canonical device name is
262 /dev/hwrng.
263 The way to make use of this is to install the rng-tools package
264 (check your distro, or download from
265 http://sourceforge.net/projects/gkernel/). rngd periodically reads
266 /dev/hwrng and injects the entropy into /dev/random.
267
268config HW_RANDOM_PSERIES
269 tristate "pSeries HW Random Number Generator support"
270 depends on HW_RANDOM && PPC64 && IBMVIO
271 default HW_RANDOM
272 ---help---
273 This driver provides kernel-side support for the Random Number
274 Generator hardware found on POWER7+ machines and above
275
276 To compile this driver as a module, choose M here: the
277 module will be called pseries-rng.
278
279 If unsure, say Y.
280
281config HW_RANDOM_EXYNOS
282 tristate "EXYNOS HW random number generator support"
283 depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
284 ---help---
285 This driver provides kernel-side support for the Random Number
286 Generator hardware found on EXYNOS SOCs.
287
288 To compile this driver as a module, choose M here: the
289 module will be called exynos-rng.
290
291 If unsure, say Y.
292
293config HW_RANDOM_TPM
294 tristate "TPM HW Random Number Generator support"
295 depends on HW_RANDOM && TCG_TPM
296 default HW_RANDOM
297 ---help---
298 This driver provides kernel-side support for the Random Number
299 Generator in the Trusted Platform Module
300
301 To compile this driver as a module, choose M here: the
302 module will be called tpm-rng.
303
304 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 1fd7eec9fbf..c88f244c8a7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -7,8 +7,6 @@ rng-core-y := core.o
7obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o 7obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
8obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o 8obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
9obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o 9obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
10obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
11obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
12obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o 10obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
13obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o 11obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
14n2-rng-y := n2-drv.o n2-asm.o 12n2-rng-y := n2-drv.o n2-asm.o
@@ -23,6 +21,3 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
23obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o 21obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
24obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o 22obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
25obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o 23obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
26obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
27obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
28obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
deleted file mode 100644
index 7c73d4aca36..00000000000
--- a/drivers/char/hw_random/atmel-rng.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * Copyright (c) 2011 Peter Korsgaard <jacmet@sunsite.dk>
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/err.h>
13#include <linux/clk.h>
14#include <linux/io.h>
15#include <linux/hw_random.h>
16#include <linux/platform_device.h>
17
18#define TRNG_CR 0x00
19#define TRNG_ISR 0x1c
20#define TRNG_ODATA 0x50
21
22#define TRNG_KEY 0x524e4700 /* RNG */
23
24struct atmel_trng {
25 struct clk *clk;
26 void __iomem *base;
27 struct hwrng rng;
28};
29
30static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
31 bool wait)
32{
33 struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
34 u32 *data = buf;
35
36 /* data ready? */
37 if (readl(trng->base + TRNG_ISR) & 1) {
38 *data = readl(trng->base + TRNG_ODATA);
39 /*
40 ensure data ready is only set again AFTER the next data
41 word is ready in case it got set between checking ISR
42 and reading ODATA, so we don't risk re-reading the
43 same word
44 */
45 readl(trng->base + TRNG_ISR);
46 return 4;
47 } else
48 return 0;
49}
50
51static int atmel_trng_probe(struct platform_device *pdev)
52{
53 struct atmel_trng *trng;
54 struct resource *res;
55 int ret;
56
57 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
58 if (!res)
59 return -EINVAL;
60
61 trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
62 if (!trng)
63 return -ENOMEM;
64
65 if (!devm_request_mem_region(&pdev->dev, res->start,
66 resource_size(res), pdev->name))
67 return -EBUSY;
68
69 trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
70 if (!trng->base)
71 return -EBUSY;
72
73 trng->clk = clk_get(&pdev->dev, NULL);
74 if (IS_ERR(trng->clk))
75 return PTR_ERR(trng->clk);
76
77 ret = clk_enable(trng->clk);
78 if (ret)
79 goto err_enable;
80
81 writel(TRNG_KEY | 1, trng->base + TRNG_CR);
82 trng->rng.name = pdev->name;
83 trng->rng.read = atmel_trng_read;
84
85 ret = hwrng_register(&trng->rng);
86 if (ret)
87 goto err_register;
88
89 platform_set_drvdata(pdev, trng);
90
91 return 0;
92
93err_register:
94 clk_disable(trng->clk);
95err_enable:
96 clk_put(trng->clk);
97
98 return ret;
99}
100
101static int atmel_trng_remove(struct platform_device *pdev)
102{
103 struct atmel_trng *trng = platform_get_drvdata(pdev);
104
105 hwrng_unregister(&trng->rng);
106
107 writel(TRNG_KEY, trng->base + TRNG_CR);
108 clk_disable(trng->clk);
109 clk_put(trng->clk);
110
111 platform_set_drvdata(pdev, NULL);
112
113 return 0;
114}
115
116#ifdef CONFIG_PM
117static int atmel_trng_suspend(struct device *dev)
118{
119 struct atmel_trng *trng = dev_get_drvdata(dev);
120
121 clk_disable(trng->clk);
122
123 return 0;
124}
125
126static int atmel_trng_resume(struct device *dev)
127{
128 struct atmel_trng *trng = dev_get_drvdata(dev);
129
130 return clk_enable(trng->clk);
131}
132
133static const struct dev_pm_ops atmel_trng_pm_ops = {
134 .suspend = atmel_trng_suspend,
135 .resume = atmel_trng_resume,
136};
137#endif /* CONFIG_PM */
138
139static struct platform_driver atmel_trng_driver = {
140 .probe = atmel_trng_probe,
141 .remove = atmel_trng_remove,
142 .driver = {
143 .name = "atmel-trng",
144 .owner = THIS_MODULE,
145#ifdef CONFIG_PM
146 .pm = &atmel_trng_pm_ops,
147#endif /* CONFIG_PM */
148 },
149};
150
151module_platform_driver(atmel_trng_driver);
152
153MODULE_LICENSE("GPL");
154MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
155MODULE_DESCRIPTION("Atmel true random number generator driver");
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index f343b7d0dfa..00000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * Broadcom BCM63xx Random Number Generator support
3 *
4 * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2009, Broadcom Corporation
6 *
7 */
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/io.h>
11#include <linux/err.h>
12#include <linux/clk.h>
13#include <linux/platform_device.h>
14#include <linux/hw_random.h>
15
16#include <bcm63xx_io.h>
17#include <bcm63xx_regs.h>
18
19struct bcm63xx_rng_priv {
20 struct clk *clk;
21 void __iomem *regs;
22};
23
24#define to_rng_priv(rng) ((struct bcm63xx_rng_priv *)rng->priv)
25
26static int bcm63xx_rng_init(struct hwrng *rng)
27{
28 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
29 u32 val;
30
31 val = bcm_readl(priv->regs + RNG_CTRL);
32 val |= RNG_EN;
33 bcm_writel(val, priv->regs + RNG_CTRL);
34
35 return 0;
36}
37
38static void bcm63xx_rng_cleanup(struct hwrng *rng)
39{
40 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
41 u32 val;
42
43 val = bcm_readl(priv->regs + RNG_CTRL);
44 val &= ~RNG_EN;
45 bcm_writel(val, priv->regs + RNG_CTRL);
46}
47
48static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
49{
50 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
51
52 return bcm_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
53}
54
55static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
56{
57 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
58
59 *data = bcm_readl(priv->regs + RNG_DATA);
60
61 return 4;
62}
63
64static int bcm63xx_rng_probe(struct platform_device *pdev)
65{
66 struct resource *r;
67 struct clk *clk;
68 int ret;
69 struct bcm63xx_rng_priv *priv;
70 struct hwrng *rng;
71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "no iomem resource\n");
75 ret = -ENXIO;
76 goto out;
77 }
78
79 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
80 if (!priv) {
81 dev_err(&pdev->dev, "no memory for private structure\n");
82 ret = -ENOMEM;
83 goto out;
84 }
85
86 rng = kzalloc(sizeof(*rng), GFP_KERNEL);
87 if (!rng) {
88 dev_err(&pdev->dev, "no memory for rng structure\n");
89 ret = -ENOMEM;
90 goto out_free_priv;
91 }
92
93 platform_set_drvdata(pdev, rng);
94 rng->priv = (unsigned long)priv;
95 rng->name = pdev->name;
96 rng->init = bcm63xx_rng_init;
97 rng->cleanup = bcm63xx_rng_cleanup;
98 rng->data_present = bcm63xx_rng_data_present;
99 rng->data_read = bcm63xx_rng_data_read;
100
101 clk = clk_get(&pdev->dev, "ipsec");
102 if (IS_ERR(clk)) {
103 dev_err(&pdev->dev, "no clock for device\n");
104 ret = PTR_ERR(clk);
105 goto out_free_rng;
106 }
107
108 priv->clk = clk;
109
110 if (!devm_request_mem_region(&pdev->dev, r->start,
111 resource_size(r), pdev->name)) {
112 dev_err(&pdev->dev, "request mem failed");
113 ret = -ENOMEM;
114 goto out_free_rng;
115 }
116
117 priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
118 resource_size(r));
119 if (!priv->regs) {
120 dev_err(&pdev->dev, "ioremap failed");
121 ret = -ENOMEM;
122 goto out_free_rng;
123 }
124
125 clk_enable(clk);
126
127 ret = hwrng_register(rng);
128 if (ret) {
129 dev_err(&pdev->dev, "failed to register rng device\n");
130 goto out_clk_disable;
131 }
132
133 dev_info(&pdev->dev, "registered RNG driver\n");
134
135 return 0;
136
137out_clk_disable:
138 clk_disable(clk);
139out_free_rng:
140 platform_set_drvdata(pdev, NULL);
141 kfree(rng);
142out_free_priv:
143 kfree(priv);
144out:
145 return ret;
146}
147
148static int bcm63xx_rng_remove(struct platform_device *pdev)
149{
150 struct hwrng *rng = platform_get_drvdata(pdev);
151 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
152
153 hwrng_unregister(rng);
154 clk_disable(priv->clk);
155 kfree(priv);
156 kfree(rng);
157 platform_set_drvdata(pdev, NULL);
158
159 return 0;
160}
161
162static struct platform_driver bcm63xx_rng_driver = {
163 .probe = bcm63xx_rng_probe,
164 .remove = bcm63xx_rng_remove,
165 .driver = {
166 .name = "bcm63xx-rng",
167 .owner = THIS_MODULE,
168 },
169};
170
171module_platform_driver(bcm63xx_rng_driver);
172
173MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
174MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
175MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
deleted file mode 100644
index 48bbfeca4b5..00000000000
--- a/drivers/char/hw_random/exynos-rng.c
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * exynos-rng.c - Random Number Generator driver for the exynos
3 *
4 * Copyright (C) 2012 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@smasung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/hw_random.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/clk.h>
29#include <linux/pm_runtime.h>
30#include <linux/err.h>
31
32#define EXYNOS_PRNG_STATUS_OFFSET 0x10
33#define EXYNOS_PRNG_SEED_OFFSET 0x140
34#define EXYNOS_PRNG_OUT1_OFFSET 0x160
35#define SEED_SETTING_DONE BIT(1)
36#define PRNG_START 0x18
37#define PRNG_DONE BIT(5)
38#define EXYNOS_AUTOSUSPEND_DELAY 100
39
40struct exynos_rng {
41 struct device *dev;
42 struct hwrng rng;
43 void __iomem *mem;
44 struct clk *clk;
45};
46
47static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
48{
49 return __raw_readl(rng->mem + offset);
50}
51
52static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
53{
54 __raw_writel(val, rng->mem + offset);
55}
56
57static int exynos_init(struct hwrng *rng)
58{
59 struct exynos_rng *exynos_rng = container_of(rng,
60 struct exynos_rng, rng);
61 int i;
62 int ret = 0;
63
64 pm_runtime_get_sync(exynos_rng->dev);
65
66 for (i = 0 ; i < 5 ; i++)
67 exynos_rng_writel(exynos_rng, jiffies,
68 EXYNOS_PRNG_SEED_OFFSET + 4*i);
69
70 if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET)
71 & SEED_SETTING_DONE))
72 ret = -EIO;
73
74 pm_runtime_put_noidle(exynos_rng->dev);
75
76 return ret;
77}
78
79static int exynos_read(struct hwrng *rng, void *buf,
80 size_t max, bool wait)
81{
82 struct exynos_rng *exynos_rng = container_of(rng,
83 struct exynos_rng, rng);
84 u32 *data = buf;
85
86 pm_runtime_get_sync(exynos_rng->dev);
87
88 exynos_rng_writel(exynos_rng, PRNG_START, 0);
89
90 while (!(exynos_rng_readl(exynos_rng,
91 EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE))
92 cpu_relax();
93
94 exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
95
96 *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
97
98 pm_runtime_mark_last_busy(exynos_rng->dev);
99 pm_runtime_autosuspend(exynos_rng->dev);
100
101 return 4;
102}
103
104static int exynos_rng_probe(struct platform_device *pdev)
105{
106 struct exynos_rng *exynos_rng;
107
108 exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
109 GFP_KERNEL);
110 if (!exynos_rng)
111 return -ENOMEM;
112
113 exynos_rng->dev = &pdev->dev;
114 exynos_rng->rng.name = "exynos";
115 exynos_rng->rng.init = exynos_init;
116 exynos_rng->rng.read = exynos_read;
117 exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
118 if (IS_ERR(exynos_rng->clk)) {
119 dev_err(&pdev->dev, "Couldn't get clock.\n");
120 return -ENOENT;
121 }
122
123 exynos_rng->mem = devm_request_and_ioremap(&pdev->dev,
124 platform_get_resource(pdev, IORESOURCE_MEM, 0));
125 if (!exynos_rng->mem)
126 return -EBUSY;
127
128 platform_set_drvdata(pdev, exynos_rng);
129
130 pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
131 pm_runtime_use_autosuspend(&pdev->dev);
132 pm_runtime_enable(&pdev->dev);
133
134 return hwrng_register(&exynos_rng->rng);
135}
136
137static int exynos_rng_remove(struct platform_device *pdev)
138{
139 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
140
141 hwrng_unregister(&exynos_rng->rng);
142
143 return 0;
144}
145
146static int exynos_rng_runtime_suspend(struct device *dev)
147{
148 struct platform_device *pdev = to_platform_device(dev);
149 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
150
151 clk_disable_unprepare(exynos_rng->clk);
152
153 return 0;
154}
155
156static int exynos_rng_runtime_resume(struct device *dev)
157{
158 struct platform_device *pdev = to_platform_device(dev);
159 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
160
161 return clk_prepare_enable(exynos_rng->clk);
162}
163
164
165UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
166 exynos_rng_runtime_resume, NULL);
167
168static struct platform_driver exynos_rng_driver = {
169 .driver = {
170 .name = "exynos-rng",
171 .owner = THIS_MODULE,
172 .pm = &exynos_rng_pm_ops,
173 },
174 .probe = exynos_rng_probe,
175 .remove = exynos_rng_remove,
176};
177
178module_platform_driver(exynos_rng_driver);
179
180MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver");
181MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
182MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index beec1627db3..263567f5f39 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -45,9 +45,6 @@ static int __init ixp4xx_rng_init(void)
45 void __iomem * rng_base; 45 void __iomem * rng_base;
46 int err; 46 int err;
47 47
48 if (!cpu_is_ixp46x()) /* includes IXP455 */
49 return -ENOSYS;
50
51 rng_base = ioremap(0x70002100, 4); 48 rng_base = ioremap(0x70002100, 4);
52 if (!rng_base) 49 if (!rng_base)
53 return -ENOMEM; 50 return -ENOMEM;
@@ -71,5 +68,5 @@ module_init(ixp4xx_rng_init);
71module_exit(ixp4xx_rng_exit); 68module_exit(ixp4xx_rng_exit);
72 69
73MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 70MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
74MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x"); 71MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
75MODULE_LICENSE("GPL"); 72MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index f05d85713fd..187c6be80f4 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -24,7 +24,6 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/hw_random.h> 26#include <linux/hw_random.h>
27#include <linux/delay.h>
28#include <linux/io.h> 27#include <linux/io.h>
29 28
30/* RNGA Registers */ 29/* RNGA Registers */
@@ -59,47 +58,38 @@
59#define RNGA_STATUS_LAST_READ_STATUS 0x00000002 58#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
60#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 59#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
61 60
62struct mxc_rng { 61static struct platform_device *rng_dev;
63 struct device *dev;
64 struct hwrng rng;
65 void __iomem *mem;
66 struct clk *clk;
67};
68 62
69static int mxc_rnga_data_present(struct hwrng *rng, int wait) 63static int mxc_rnga_data_present(struct hwrng *rng)
70{ 64{
71 int i; 65 int level;
72 struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); 66 void __iomem *rng_base = (void __iomem *)rng->priv;
73 67
74 for (i = 0; i < 20; i++) { 68 /* how many random numbers is in FIFO? [0-16] */
75 /* how many random numbers are in FIFO? [0-16] */ 69 level = ((__raw_readl(rng_base + RNGA_STATUS) &
76 int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) & 70 RNGA_STATUS_LEVEL_MASK) >> 8);
77 RNGA_STATUS_LEVEL_MASK) >> 8; 71
78 if (level || !wait) 72 return level > 0 ? 1 : 0;
79 return !!level;
80 udelay(10);
81 }
82 return 0;
83} 73}
84 74
85static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) 75static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
86{ 76{
87 int err; 77 int err;
88 u32 ctrl; 78 u32 ctrl;
89 struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); 79 void __iomem *rng_base = (void __iomem *)rng->priv;
90 80
91 /* retrieve a random number from FIFO */ 81 /* retrieve a random number from FIFO */
92 *data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO); 82 *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO);
93 83
94 /* some error while reading this random number? */ 84 /* some error while reading this random number? */
95 err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; 85 err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
96 86
97 /* if error: clear error interrupt, but doesn't return random number */ 87 /* if error: clear error interrupt, but doesn't return random number */
98 if (err) { 88 if (err) {
99 dev_dbg(mxc_rng->dev, "Error while reading random number!\n"); 89 dev_dbg(&rng_dev->dev, "Error while reading random number!\n");
100 ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); 90 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
101 __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, 91 __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
102 mxc_rng->mem + RNGA_CONTROL); 92 rng_base + RNGA_CONTROL);
103 return 0; 93 return 0;
104 } else 94 } else
105 return 4; 95 return 4;
@@ -108,22 +98,22 @@ static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
108static int mxc_rnga_init(struct hwrng *rng) 98static int mxc_rnga_init(struct hwrng *rng)
109{ 99{
110 u32 ctrl, osc; 100 u32 ctrl, osc;
111 struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); 101 void __iomem *rng_base = (void __iomem *)rng->priv;
112 102
113 /* wake up */ 103 /* wake up */
114 ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); 104 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
115 __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL); 105 __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL);
116 106
117 /* verify if oscillator is working */ 107 /* verify if oscillator is working */
118 osc = __raw_readl(mxc_rng->mem + RNGA_STATUS); 108 osc = __raw_readl(rng_base + RNGA_STATUS);
119 if (osc & RNGA_STATUS_OSC_DEAD) { 109 if (osc & RNGA_STATUS_OSC_DEAD) {
120 dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n"); 110 dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n");
121 return -ENODEV; 111 return -ENODEV;
122 } 112 }
123 113
124 /* go running */ 114 /* go running */
125 ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); 115 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
126 __raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); 116 __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
127 117
128 return 0; 118 return 0;
129} 119}
@@ -131,40 +121,40 @@ static int mxc_rnga_init(struct hwrng *rng)
131static void mxc_rnga_cleanup(struct hwrng *rng) 121static void mxc_rnga_cleanup(struct hwrng *rng)
132{ 122{
133 u32 ctrl; 123 u32 ctrl;
134 struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); 124 void __iomem *rng_base = (void __iomem *)rng->priv;
135 125
136 ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); 126 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
137 127
138 /* stop rnga */ 128 /* stop rnga */
139 __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); 129 __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
140} 130}
141 131
132static struct hwrng mxc_rnga = {
133 .name = "mxc-rnga",
134 .init = mxc_rnga_init,
135 .cleanup = mxc_rnga_cleanup,
136 .data_present = mxc_rnga_data_present,
137 .data_read = mxc_rnga_data_read
138};
139
142static int __init mxc_rnga_probe(struct platform_device *pdev) 140static int __init mxc_rnga_probe(struct platform_device *pdev)
143{ 141{
144 int err = -ENODEV; 142 int err = -ENODEV;
143 struct clk *clk;
145 struct resource *res, *mem; 144 struct resource *res, *mem;
146 struct mxc_rng *mxc_rng; 145 void __iomem *rng_base = NULL;
147 146
148 mxc_rng = devm_kzalloc(&pdev->dev, sizeof(struct mxc_rng), 147 if (rng_dev)
149 GFP_KERNEL); 148 return -EBUSY;
150 if (!mxc_rng) 149
151 return -ENOMEM; 150 clk = clk_get(&pdev->dev, "rng");
152 151 if (IS_ERR(clk)) {
153 mxc_rng->dev = &pdev->dev;
154 mxc_rng->rng.name = "mxc-rnga";
155 mxc_rng->rng.init = mxc_rnga_init;
156 mxc_rng->rng.cleanup = mxc_rnga_cleanup,
157 mxc_rng->rng.data_present = mxc_rnga_data_present,
158 mxc_rng->rng.data_read = mxc_rnga_data_read,
159
160 mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
161 if (IS_ERR(mxc_rng->clk)) {
162 dev_err(&pdev->dev, "Could not get rng_clk!\n"); 152 dev_err(&pdev->dev, "Could not get rng_clk!\n");
163 err = PTR_ERR(mxc_rng->clk); 153 err = PTR_ERR(clk);
164 goto out; 154 goto out;
165 } 155 }
166 156
167 clk_prepare_enable(mxc_rng->clk); 157 clk_enable(clk);
168 158
169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 159 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
170 if (!res) { 160 if (!res) {
@@ -178,27 +168,36 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
178 goto err_region; 168 goto err_region;
179 } 169 }
180 170
181 mxc_rng->mem = ioremap(res->start, resource_size(res)); 171 rng_base = ioremap(res->start, resource_size(res));
182 if (!mxc_rng->mem) { 172 if (!rng_base) {
183 err = -ENOMEM; 173 err = -ENOMEM;
184 goto err_ioremap; 174 goto err_ioremap;
185 } 175 }
186 176
187 err = hwrng_register(&mxc_rng->rng); 177 mxc_rnga.priv = (unsigned long)rng_base;
178
179 err = hwrng_register(&mxc_rnga);
188 if (err) { 180 if (err) {
189 dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); 181 dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
190 goto err_ioremap; 182 goto err_register;
191 } 183 }
192 184
185 rng_dev = pdev;
186
193 dev_info(&pdev->dev, "MXC RNGA Registered.\n"); 187 dev_info(&pdev->dev, "MXC RNGA Registered.\n");
194 188
195 return 0; 189 return 0;
196 190
191err_register:
192 iounmap(rng_base);
193 rng_base = NULL;
194
197err_ioremap: 195err_ioremap:
198 release_mem_region(res->start, resource_size(res)); 196 release_mem_region(res->start, resource_size(res));
199 197
200err_region: 198err_region:
201 clk_disable_unprepare(mxc_rng->clk); 199 clk_disable(clk);
200 clk_put(clk);
202 201
203out: 202out:
204 return err; 203 return err;
@@ -207,15 +206,17 @@ out:
207static int __exit mxc_rnga_remove(struct platform_device *pdev) 206static int __exit mxc_rnga_remove(struct platform_device *pdev)
208{ 207{
209 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 208 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); 209 void __iomem *rng_base = (void __iomem *)mxc_rnga.priv;
210 struct clk *clk = clk_get(&pdev->dev, "rng");
211 211
212 hwrng_unregister(&mxc_rng->rng); 212 hwrng_unregister(&mxc_rnga);
213 213
214 iounmap(mxc_rng->mem); 214 iounmap(rng_base);
215 215
216 release_mem_region(res->start, resource_size(res)); 216 release_mem_region(res->start, resource_size(res));
217 217
218 clk_disable_unprepare(mxc_rng->clk); 218 clk_disable(clk);
219 clk_put(clk);
219 220
220 return 0; 221 return 0;
221} 222}
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 20b962e1d83..c3de70de00d 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -25,7 +25,7 @@
25#define DRV_MODULE_VERSION "0.2" 25#define DRV_MODULE_VERSION "0.2"
26#define DRV_MODULE_RELDATE "July 27, 2011" 26#define DRV_MODULE_RELDATE "July 27, 2011"
27 27
28static char version[] = 28static char version[] __devinitdata =
29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
30 30
31MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 31MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -611,7 +611,7 @@ static void n2rng_work(struct work_struct *work)
611 schedule_delayed_work(&np->work, HZ * 2); 611 schedule_delayed_work(&np->work, HZ * 2);
612} 612}
613 613
614static void n2rng_driver_version(void) 614static void __devinit n2rng_driver_version(void)
615{ 615{
616 static int n2rng_version_printed; 616 static int n2rng_version_printed;
617 617
@@ -620,7 +620,7 @@ static void n2rng_driver_version(void)
620} 620}
621 621
622static const struct of_device_id n2rng_match[]; 622static const struct of_device_id n2rng_match[];
623static int n2rng_probe(struct platform_device *op) 623static int __devinit n2rng_probe(struct platform_device *op)
624{ 624{
625 const struct of_device_id *match; 625 const struct of_device_id *match;
626 int multi_capable; 626 int multi_capable;
@@ -719,7 +719,7 @@ out:
719 return err; 719 return err;
720} 720}
721 721
722static int n2rng_remove(struct platform_device *op) 722static int __devexit n2rng_remove(struct platform_device *op)
723{ 723{
724 struct n2rng *np = dev_get_drvdata(&op->dev); 724 struct n2rng *np = dev_get_drvdata(&op->dev);
725 725
@@ -767,7 +767,18 @@ static struct platform_driver n2rng_driver = {
767 .of_match_table = n2rng_match, 767 .of_match_table = n2rng_match,
768 }, 768 },
769 .probe = n2rng_probe, 769 .probe = n2rng_probe,
770 .remove = n2rng_remove, 770 .remove = __devexit_p(n2rng_remove),
771}; 771};
772 772
773module_platform_driver(n2rng_driver); 773static int __init n2rng_init(void)
774{
775 return platform_driver_register(&n2rng_driver);
776}
777
778static void __exit n2rng_exit(void)
779{
780 platform_driver_unregister(&n2rng_driver);
781}
782
783module_init(n2rng_init);
784module_exit(n2rng_exit);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 96de0249e59..52e08ca3ccd 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -95,8 +95,6 @@ static struct amba_id nmk_rng_ids[] = {
95 {0, 0}, 95 {0, 0},
96}; 96};
97 97
98MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
99
100static struct amba_driver nmk_rng_driver = { 98static struct amba_driver nmk_rng_driver = {
101 .drv = { 99 .drv = {
102 .owner = THIS_MODULE, 100 .owner = THIS_MODULE,
@@ -107,6 +105,17 @@ static struct amba_driver nmk_rng_driver = {
107 .id_table = nmk_rng_ids, 105 .id_table = nmk_rng_ids,
108}; 106};
109 107
110module_amba_driver(nmk_rng_driver); 108static int __init nmk_rng_init(void)
109{
110 return amba_driver_register(&nmk_rng_driver);
111}
112
113static void __devexit nmk_rng_exit(void)
114{
115 amba_driver_unregister(&nmk_rng_driver);
116}
117
118module_init(nmk_rng_init);
119module_exit(nmk_rng_exit);
111 120
112MODULE_LICENSE("GPL"); 121MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 1eada566ca7..9cd0feca318 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -56,7 +56,7 @@ static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
56 return sizeof(u32); 56 return sizeof(u32);
57} 57}
58 58
59static int octeon_rng_probe(struct platform_device *pdev) 59static int __devinit octeon_rng_probe(struct platform_device *pdev)
60{ 60{
61 struct resource *res_ports; 61 struct resource *res_ports;
62 struct resource *res_result; 62 struct resource *res_result;
@@ -75,35 +75,42 @@ static int octeon_rng_probe(struct platform_device *pdev)
75 75
76 res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0); 76 res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0);
77 if (!res_ports) 77 if (!res_ports)
78 return -ENOENT; 78 goto err_ports;
79 79
80 res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1); 80 res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1);
81 if (!res_result) 81 if (!res_result)
82 return -ENOENT; 82 goto err_ports;
83 83
84 84
85 rng->control_status = devm_ioremap_nocache(&pdev->dev, 85 rng->control_status = devm_ioremap_nocache(&pdev->dev,
86 res_ports->start, 86 res_ports->start,
87 sizeof(u64)); 87 sizeof(u64));
88 if (!rng->control_status) 88 if (!rng->control_status)
89 return -ENOENT; 89 goto err_ports;
90 90
91 rng->result = devm_ioremap_nocache(&pdev->dev, 91 rng->result = devm_ioremap_nocache(&pdev->dev,
92 res_result->start, 92 res_result->start,
93 sizeof(u64)); 93 sizeof(u64));
94 if (!rng->result) 94 if (!rng->result)
95 return -ENOENT; 95 goto err_r;
96 96
97 rng->ops = ops; 97 rng->ops = ops;
98 98
99 dev_set_drvdata(&pdev->dev, &rng->ops); 99 dev_set_drvdata(&pdev->dev, &rng->ops);
100 ret = hwrng_register(&rng->ops); 100 ret = hwrng_register(&rng->ops);
101 if (ret) 101 if (ret)
102 return -ENOENT; 102 goto err;
103 103
104 dev_info(&pdev->dev, "Octeon Random Number Generator\n"); 104 dev_info(&pdev->dev, "Octeon Random Number Generator\n");
105 105
106 return 0; 106 return 0;
107err:
108 devm_iounmap(&pdev->dev, rng->control_status);
109err_r:
110 devm_iounmap(&pdev->dev, rng->result);
111err_ports:
112 devm_kfree(&pdev->dev, rng);
113 return -ENOENT;
107} 114}
108 115
109static int __exit octeon_rng_remove(struct platform_device *pdev) 116static int __exit octeon_rng_remove(struct platform_device *pdev)
@@ -124,7 +131,18 @@ static struct platform_driver octeon_rng_driver = {
124 .remove = __exit_p(octeon_rng_remove), 131 .remove = __exit_p(octeon_rng_remove),
125}; 132};
126 133
127module_platform_driver(octeon_rng_driver); 134static int __init octeon_rng_mod_init(void)
135{
136 return platform_driver_register(&octeon_rng_driver);
137}
138
139static void __exit octeon_rng_mod_exit(void)
140{
141 platform_driver_unregister(&octeon_rng_driver);
142}
143
144module_init(octeon_rng_mod_init);
145module_exit(octeon_rng_mod_exit);
128 146
129MODULE_AUTHOR("David Daney"); 147MODULE_AUTHOR("David Daney");
130MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d8c54e25376..b757fac3cd1 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -18,12 +18,11 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/random.h> 20#include <linux/random.h>
21#include <linux/clk.h>
21#include <linux/err.h> 22#include <linux/err.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/hw_random.h> 24#include <linux/hw_random.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/slab.h>
26#include <linux/pm_runtime.h>
27 26
28#include <asm/io.h> 27#include <asm/io.h>
29 28
@@ -45,36 +44,26 @@
45#define RNG_SYSSTATUS 0x44 /* System status 44#define RNG_SYSSTATUS 0x44 /* System status
46 [0] = RESETDONE */ 45 [0] = RESETDONE */
47 46
48/** 47static void __iomem *rng_base;
49 * struct omap_rng_private_data - RNG IP block-specific data 48static struct clk *rng_ick;
50 * @base: virtual address of the beginning of the RNG IP block registers 49static struct platform_device *rng_dev;
51 * @mem_res: struct resource * for the IP block registers physical memory
52 */
53struct omap_rng_private_data {
54 void __iomem *base;
55 struct resource *mem_res;
56};
57 50
58static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg) 51static inline u32 omap_rng_read_reg(int reg)
59{ 52{
60 return __raw_readl(priv->base + reg); 53 return __raw_readl(rng_base + reg);
61} 54}
62 55
63static inline void omap_rng_write_reg(struct omap_rng_private_data *priv, 56static inline void omap_rng_write_reg(int reg, u32 val)
64 int reg, u32 val)
65{ 57{
66 __raw_writel(val, priv->base + reg); 58 __raw_writel(val, rng_base + reg);
67} 59}
68 60
69static int omap_rng_data_present(struct hwrng *rng, int wait) 61static int omap_rng_data_present(struct hwrng *rng, int wait)
70{ 62{
71 struct omap_rng_private_data *priv;
72 int data, i; 63 int data, i;
73 64
74 priv = (struct omap_rng_private_data *)rng->priv;
75
76 for (i = 0; i < 20; i++) { 65 for (i = 0; i < 20; i++) {
77 data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1; 66 data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
78 if (data || !wait) 67 if (data || !wait)
79 break; 68 break;
80 /* RNG produces data fast enough (2+ MBit/sec, even 69 /* RNG produces data fast enough (2+ MBit/sec, even
@@ -89,13 +78,9 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
89 78
90static int omap_rng_data_read(struct hwrng *rng, u32 *data) 79static int omap_rng_data_read(struct hwrng *rng, u32 *data)
91{ 80{
92 struct omap_rng_private_data *priv; 81 *data = omap_rng_read_reg(RNG_OUT_REG);
93
94 priv = (struct omap_rng_private_data *)rng->priv;
95
96 *data = omap_rng_read_reg(priv, RNG_OUT_REG);
97 82
98 return sizeof(u32); 83 return 4;
99} 84}
100 85
101static struct hwrng omap_rng_ops = { 86static struct hwrng omap_rng_ops = {
@@ -104,102 +89,111 @@ static struct hwrng omap_rng_ops = {
104 .data_read = omap_rng_data_read, 89 .data_read = omap_rng_data_read,
105}; 90};
106 91
107static int omap_rng_probe(struct platform_device *pdev) 92static int __devinit omap_rng_probe(struct platform_device *pdev)
108{ 93{
109 struct omap_rng_private_data *priv; 94 struct resource *res;
110 int ret; 95 int ret;
111 96
112 priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL); 97 /*
113 if (!priv) { 98 * A bit ugly, and it will never actually happen but there can
114 dev_err(&pdev->dev, "could not allocate memory\n"); 99 * be only one RNG and this catches any bork
115 return -ENOMEM; 100 */
116 }; 101 if (rng_dev)
102 return -EBUSY;
103
104 if (cpu_is_omap24xx()) {
105 rng_ick = clk_get(&pdev->dev, "ick");
106 if (IS_ERR(rng_ick)) {
107 dev_err(&pdev->dev, "Could not get rng_ick\n");
108 ret = PTR_ERR(rng_ick);
109 return ret;
110 } else
111 clk_enable(rng_ick);
112 }
117 113
118 omap_rng_ops.priv = (unsigned long)priv; 114 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 dev_set_drvdata(&pdev->dev, priv);
120 115
121 priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 116 if (!res) {
122 if (!priv->mem_res) {
123 ret = -ENOENT; 117 ret = -ENOENT;
124 goto err_ioremap; 118 goto err_region;
125 } 119 }
126 120
127 priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res); 121 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
128 if (!priv->base) { 122 ret = -EBUSY;
123 goto err_region;
124 }
125
126 dev_set_drvdata(&pdev->dev, res);
127 rng_base = ioremap(res->start, resource_size(res));
128 if (!rng_base) {
129 ret = -ENOMEM; 129 ret = -ENOMEM;
130 goto err_ioremap; 130 goto err_ioremap;
131 } 131 }
132 dev_set_drvdata(&pdev->dev, priv);
133
134 pm_runtime_enable(&pdev->dev);
135 pm_runtime_get_sync(&pdev->dev);
136 132
137 ret = hwrng_register(&omap_rng_ops); 133 ret = hwrng_register(&omap_rng_ops);
138 if (ret) 134 if (ret)
139 goto err_register; 135 goto err_register;
140 136
141 dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", 137 dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
142 omap_rng_read_reg(priv, RNG_REV_REG)); 138 omap_rng_read_reg(RNG_REV_REG));
139 omap_rng_write_reg(RNG_MASK_REG, 0x1);
143 140
144 omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); 141 rng_dev = pdev;
145 142
146 return 0; 143 return 0;
147 144
148err_register: 145err_register:
149 priv->base = NULL; 146 iounmap(rng_base);
150 pm_runtime_disable(&pdev->dev); 147 rng_base = NULL;
151err_ioremap: 148err_ioremap:
152 kfree(priv); 149 release_mem_region(res->start, resource_size(res));
153 150err_region:
151 if (cpu_is_omap24xx()) {
152 clk_disable(rng_ick);
153 clk_put(rng_ick);
154 }
154 return ret; 155 return ret;
155} 156}
156 157
157static int __exit omap_rng_remove(struct platform_device *pdev) 158static int __exit omap_rng_remove(struct platform_device *pdev)
158{ 159{
159 struct omap_rng_private_data *priv = dev_get_drvdata(&pdev->dev); 160 struct resource *res = dev_get_drvdata(&pdev->dev);
160 161
161 hwrng_unregister(&omap_rng_ops); 162 hwrng_unregister(&omap_rng_ops);
162 163
163 omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); 164 omap_rng_write_reg(RNG_MASK_REG, 0x0);
164 165
165 pm_runtime_put_sync(&pdev->dev); 166 iounmap(rng_base);
166 pm_runtime_disable(&pdev->dev);
167 167
168 release_mem_region(priv->mem_res->start, resource_size(priv->mem_res)); 168 if (cpu_is_omap24xx()) {
169 clk_disable(rng_ick);
170 clk_put(rng_ick);
171 }
169 172
170 kfree(priv); 173 release_mem_region(res->start, resource_size(res));
174 rng_base = NULL;
171 175
172 return 0; 176 return 0;
173} 177}
174 178
175#ifdef CONFIG_PM_SLEEP 179#ifdef CONFIG_PM
176 180
177static int omap_rng_suspend(struct device *dev) 181static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
178{ 182{
179 struct omap_rng_private_data *priv = dev_get_drvdata(dev); 183 omap_rng_write_reg(RNG_MASK_REG, 0x0);
180
181 omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
182 pm_runtime_put_sync(dev);
183
184 return 0; 184 return 0;
185} 185}
186 186
187static int omap_rng_resume(struct device *dev) 187static int omap_rng_resume(struct platform_device *pdev)
188{ 188{
189 struct omap_rng_private_data *priv = dev_get_drvdata(dev); 189 omap_rng_write_reg(RNG_MASK_REG, 0x1);
190
191 pm_runtime_get_sync(dev);
192 omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
193
194 return 0; 190 return 0;
195} 191}
196 192
197static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
198#define OMAP_RNG_PM (&omap_rng_pm)
199
200#else 193#else
201 194
202#define OMAP_RNG_PM NULL 195#define omap_rng_suspend NULL
196#define omap_rng_resume NULL
203 197
204#endif 198#endif
205 199
@@ -210,14 +204,18 @@ static struct platform_driver omap_rng_driver = {
210 .driver = { 204 .driver = {
211 .name = "omap_rng", 205 .name = "omap_rng",
212 .owner = THIS_MODULE, 206 .owner = THIS_MODULE,
213 .pm = OMAP_RNG_PM,
214 }, 207 },
215 .probe = omap_rng_probe, 208 .probe = omap_rng_probe,
216 .remove = __exit_p(omap_rng_remove), 209 .remove = __exit_p(omap_rng_remove),
210 .suspend = omap_rng_suspend,
211 .resume = omap_rng_resume
217}; 212};
218 213
219static int __init omap_rng_init(void) 214static int __init omap_rng_init(void)
220{ 215{
216 if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
217 return -ENODEV;
218
221 return platform_driver_register(&omap_rng_driver); 219 return platform_driver_register(&omap_rng_driver);
222} 220}
223 221
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c6df5b29af0..1d504815e6d 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -94,7 +94,7 @@ static struct hwrng pasemi_rng = {
94 .data_read = pasemi_rng_data_read, 94 .data_read = pasemi_rng_data_read,
95}; 95};
96 96
97static int rng_probe(struct platform_device *ofdev) 97static int __devinit rng_probe(struct platform_device *ofdev)
98{ 98{
99 void __iomem *rng_regs; 99 void __iomem *rng_regs;
100 struct device_node *rng_np = ofdev->dev.of_node; 100 struct device_node *rng_np = ofdev->dev.of_node;
@@ -122,7 +122,7 @@ static int rng_probe(struct platform_device *ofdev)
122 return err; 122 return err;
123} 123}
124 124
125static int rng_remove(struct platform_device *dev) 125static int __devexit rng_remove(struct platform_device *dev)
126{ 126{
127 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv; 127 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
128 128
@@ -148,7 +148,17 @@ static struct platform_driver rng_driver = {
148 .remove = rng_remove, 148 .remove = rng_remove,
149}; 149};
150 150
151module_platform_driver(rng_driver); 151static int __init rng_init(void)
152{
153 return platform_driver_register(&rng_driver);
154}
155module_init(rng_init);
156
157static void __exit rng_exit(void)
158{
159 platform_driver_unregister(&rng_driver);
160}
161module_exit(rng_exit);
152 162
153MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
154MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 164MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 973b95113ed..990d55a5e3e 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -151,7 +151,7 @@ err_enable:
151 return ret; 151 return ret;
152} 152}
153 153
154static int picoxcell_trng_remove(struct platform_device *pdev) 154static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
155{ 155{
156 hwrng_unregister(&picoxcell_trng); 156 hwrng_unregister(&picoxcell_trng);
157 clk_disable(rng_clk); 157 clk_disable(rng_clk);
@@ -181,7 +181,7 @@ static const struct dev_pm_ops picoxcell_trng_pm_ops = {
181 181
182static struct platform_driver picoxcell_trng_driver = { 182static struct platform_driver picoxcell_trng_driver = {
183 .probe = picoxcell_trng_probe, 183 .probe = picoxcell_trng_probe,
184 .remove = picoxcell_trng_remove, 184 .remove = __devexit_p(picoxcell_trng_remove),
185 .driver = { 185 .driver = {
186 .name = "picoxcell-trng", 186 .name = "picoxcell-trng",
187 .owner = THIS_MODULE, 187 .owner = THIS_MODULE,
@@ -191,7 +191,17 @@ static struct platform_driver picoxcell_trng_driver = {
191 }, 191 },
192}; 192};
193 193
194module_platform_driver(picoxcell_trng_driver); 194static int __init picoxcell_trng_init(void)
195{
196 return platform_driver_register(&picoxcell_trng_driver);
197}
198module_init(picoxcell_trng_init);
199
200static void __exit picoxcell_trng_exit(void)
201{
202 platform_driver_unregister(&picoxcell_trng_driver);
203}
204module_exit(picoxcell_trng_exit);
195 205
196MODULE_LICENSE("GPL"); 206MODULE_LICENSE("GPL");
197MODULE_AUTHOR("Jamie Iles"); 207MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index 732c330805f..b8afa6a4ff6 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -90,7 +90,7 @@ static struct hwrng ppc4xx_rng = {
90 .data_read = ppc4xx_rng_data_read, 90 .data_read = ppc4xx_rng_data_read,
91}; 91};
92 92
93static int ppc4xx_rng_probe(struct platform_device *dev) 93static int __devinit ppc4xx_rng_probe(struct platform_device *dev)
94{ 94{
95 void __iomem *rng_regs; 95 void __iomem *rng_regs;
96 int err = 0; 96 int err = 0;
@@ -111,7 +111,7 @@ static int ppc4xx_rng_probe(struct platform_device *dev)
111 return err; 111 return err;
112} 112}
113 113
114static int ppc4xx_rng_remove(struct platform_device *dev) 114static int __devexit ppc4xx_rng_remove(struct platform_device *dev)
115{ 115{
116 void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv; 116 void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
117 117
@@ -139,7 +139,17 @@ static struct platform_driver ppc4xx_rng_driver = {
139 .remove = ppc4xx_rng_remove, 139 .remove = ppc4xx_rng_remove,
140}; 140};
141 141
142module_platform_driver(ppc4xx_rng_driver); 142static int __init ppc4xx_rng_init(void)
143{
144 return platform_driver_register(&ppc4xx_rng_driver);
145}
146module_init(ppc4xx_rng_init);
147
148static void __exit ppc4xx_rng_exit(void)
149{
150 platform_driver_unregister(&ppc4xx_rng_driver);
151}
152module_exit(ppc4xx_rng_exit);
143 153
144MODULE_LICENSE("GPL"); 154MODULE_LICENSE("GPL");
145MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>"); 155MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
deleted file mode 100644
index 5f1197929f0..00000000000
--- a/drivers/char/hw_random/pseries-rng.c
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * Copyright (C) 2010 Michael Neuling IBM Corporation
3 *
4 * Driver for the pseries hardware RNG for POWER7+ and above
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/hw_random.h>
22#include <asm/vio.h>
23
24#define MODULE_NAME "pseries-rng"
25
26static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
27{
28 if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
29 printk(KERN_ERR "pseries rng hcall error\n");
30 return 0;
31 }
32 return 8;
33}
34
35/**
36 * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
37 *
38 * This is a required function for a driver to operate in a CMO environment
39 * but this device does not make use of DMA allocations, return 0.
40 *
41 * Return value:
42 * Number of bytes of IO data the driver will need to perform well -> 0
43 */
44static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
45{
46 return 0;
47};
48
49static struct hwrng pseries_rng = {
50 .name = MODULE_NAME,
51 .data_read = pseries_rng_data_read,
52};
53
54static int __init pseries_rng_probe(struct vio_dev *dev,
55 const struct vio_device_id *id)
56{
57 return hwrng_register(&pseries_rng);
58}
59
60static int __exit pseries_rng_remove(struct vio_dev *dev)
61{
62 hwrng_unregister(&pseries_rng);
63 return 0;
64}
65
66static struct vio_device_id pseries_rng_driver_ids[] = {
67 { "ibm,random-v1", "ibm,random"},
68 { "", "" }
69};
70MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
71
72static struct vio_driver pseries_rng_driver = {
73 .name = MODULE_NAME,
74 .probe = pseries_rng_probe,
75 .remove = pseries_rng_remove,
76 .get_desired_dma = pseries_rng_get_desired_dma,
77 .id_table = pseries_rng_driver_ids
78};
79
80static int __init rng_init(void)
81{
82 printk(KERN_INFO "Registering IBM pSeries RNG driver\n");
83 return vio_register_driver(&pseries_rng_driver);
84}
85
86module_init(rng_init);
87
88static void __exit rng_exit(void)
89{
90 vio_unregister_driver(&pseries_rng_driver);
91}
92module_exit(rng_exit);
93
94MODULE_LICENSE("GPL");
95MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>");
96MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 849db199c02..a8428e6f64a 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,7 +88,7 @@ static struct hwrng timeriomem_rng_ops = {
88 .priv = 0, 88 .priv = 0,
89}; 89};
90 90
91static int timeriomem_rng_probe(struct platform_device *pdev) 91static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
92{ 92{
93 struct resource *res; 93 struct resource *res;
94 int ret; 94 int ret;
@@ -130,7 +130,7 @@ failed:
130 return ret; 130 return ret;
131} 131}
132 132
133static int timeriomem_rng_remove(struct platform_device *pdev) 133static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
134{ 134{
135 del_timer_sync(&timeriomem_rng_timer); 135 del_timer_sync(&timeriomem_rng_timer);
136 hwrng_unregister(&timeriomem_rng_ops); 136 hwrng_unregister(&timeriomem_rng_ops);
@@ -146,10 +146,21 @@ static struct platform_driver timeriomem_rng_driver = {
146 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
147 }, 147 },
148 .probe = timeriomem_rng_probe, 148 .probe = timeriomem_rng_probe,
149 .remove = timeriomem_rng_remove, 149 .remove = __devexit_p(timeriomem_rng_remove),
150}; 150};
151 151
152module_platform_driver(timeriomem_rng_driver); 152static int __init timeriomem_rng_init(void)
153{
154 return platform_driver_register(&timeriomem_rng_driver);
155}
156
157static void __exit timeriomem_rng_exit(void)
158{
159 platform_driver_unregister(&timeriomem_rng_driver);
160}
161
162module_init(timeriomem_rng_init);
163module_exit(timeriomem_rng_exit);
153 164
154MODULE_LICENSE("GPL"); 165MODULE_LICENSE("GPL");
155MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 166MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
deleted file mode 100644
index d6d448266f0..00000000000
--- a/drivers/char/hw_random/tpm-rng.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Copyright (C) 2012 Kent Yoder IBM Corporation
3 *
4 * HWRNG interfaces to pull RNG data from a TPM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/hw_random.h>
22#include <linux/tpm.h>
23
24#define MODULE_NAME "tpm-rng"
25
26static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
27{
28 return tpm_get_random(TPM_ANY_NUM, data, max);
29}
30
31static struct hwrng tpm_rng = {
32 .name = MODULE_NAME,
33 .read = tpm_rng_read,
34};
35
36static int __init rng_init(void)
37{
38 return hwrng_register(&tpm_rng);
39}
40module_init(rng_init);
41
42static void __exit rng_exit(void)
43{
44 hwrng_unregister(&tpm_rng);
45}
46module_exit(rng_exit);
47
48MODULE_LICENSE("GPL v2");
49MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
50MODULE_DESCRIPTION("RNG driver for TPM devices");
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index de473ef3882..0bc0cb70210 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -115,7 +115,10 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
115 rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); 115 rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
116 if (!rngdev) 116 if (!rngdev)
117 return -ENOMEM; 117 return -ENOMEM;
118 rngdev->base = devm_request_and_ioremap(&dev->dev, r); 118 if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r),
119 dev_name(&dev->dev)))
120 return -EBUSY;
121 rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r));
119 if (!rngdev->base) 122 if (!rngdev->base)
120 return -EBUSY; 123 return -EBUSY;
121 124
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index b65c1039595..75f1cbd61c1 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -23,7 +23,6 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/virtio.h> 24#include <linux/virtio.h>
25#include <linux/virtio_rng.h> 25#include <linux/virtio_rng.h>
26#include <linux/module.h>
27 26
28static struct virtqueue *vq; 27static struct virtqueue *vq;
29static unsigned int data_avail; 28static unsigned int data_avail;
@@ -47,7 +46,7 @@ static void register_buffer(u8 *buf, size_t size)
47 sg_init_one(&sg, buf, size); 46 sg_init_one(&sg, buf, size);
48 47
49 /* There should always be room for one buffer. */ 48 /* There should always be room for one buffer. */
50 if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0) 49 if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
51 BUG(); 50 BUG();
52 51
53 virtqueue_kick(vq); 52 virtqueue_kick(vq);
@@ -55,7 +54,6 @@ static void register_buffer(u8 *buf, size_t size)
55 54
56static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) 55static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
57{ 56{
58 int ret;
59 57
60 if (!busy) { 58 if (!busy) {
61 busy = true; 59 busy = true;
@@ -66,9 +64,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
66 if (!wait) 64 if (!wait)
67 return 0; 65 return 0;
68 66
69 ret = wait_for_completion_killable(&have_data); 67 wait_for_completion(&have_data);
70 if (ret < 0)
71 return ret;
72 68
73 busy = false; 69 busy = false;
74 70
@@ -88,7 +84,7 @@ static struct hwrng virtio_hwrng = {
88 .read = virtio_read, 84 .read = virtio_read,
89}; 85};
90 86
91static int probe_common(struct virtio_device *vdev) 87static int virtrng_probe(struct virtio_device *vdev)
92{ 88{
93 int err; 89 int err;
94 90
@@ -106,37 +102,13 @@ static int probe_common(struct virtio_device *vdev)
106 return 0; 102 return 0;
107} 103}
108 104
109static void remove_common(struct virtio_device *vdev) 105static void __devexit virtrng_remove(struct virtio_device *vdev)
110{ 106{
111 vdev->config->reset(vdev); 107 vdev->config->reset(vdev);
112 busy = false;
113 hwrng_unregister(&virtio_hwrng); 108 hwrng_unregister(&virtio_hwrng);
114 vdev->config->del_vqs(vdev); 109 vdev->config->del_vqs(vdev);
115} 110}
116 111
117static int virtrng_probe(struct virtio_device *vdev)
118{
119 return probe_common(vdev);
120}
121
122static void virtrng_remove(struct virtio_device *vdev)
123{
124 remove_common(vdev);
125}
126
127#ifdef CONFIG_PM
128static int virtrng_freeze(struct virtio_device *vdev)
129{
130 remove_common(vdev);
131 return 0;
132}
133
134static int virtrng_restore(struct virtio_device *vdev)
135{
136 return probe_common(vdev);
137}
138#endif
139
140static struct virtio_device_id id_table[] = { 112static struct virtio_device_id id_table[] = {
141 { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, 113 { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
142 { 0 }, 114 { 0 },
@@ -147,11 +119,7 @@ static struct virtio_driver virtio_rng_driver = {
147 .driver.owner = THIS_MODULE, 119 .driver.owner = THIS_MODULE,
148 .id_table = id_table, 120 .id_table = id_table,
149 .probe = virtrng_probe, 121 .probe = virtrng_probe,
150 .remove = virtrng_remove, 122 .remove = __devexit_p(virtrng_remove),
151#ifdef CONFIG_PM
152 .freeze = virtrng_freeze,
153 .restore = virtrng_restore,
154#endif
155}; 123};
156 124
157static int __init init(void) 125static int __init init(void)
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 40cc0cf2ded..6e40072fbf6 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -69,19 +69,19 @@ MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
69MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); 69MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
70MODULE_LICENSE("GPL"); 70MODULE_LICENSE("GPL");
71 71
72static bool force; 72static int force;
73module_param(force, bool, 0); 73module_param(force, bool, 0);
74MODULE_PARM_DESC(force, "Force loading without checking for supported models"); 74MODULE_PARM_DESC(force, "Force loading without checking for supported models");
75 75
76static bool ignore_dmi; 76static int ignore_dmi;
77module_param(ignore_dmi, bool, 0); 77module_param(ignore_dmi, bool, 0);
78MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); 78MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
79 79
80static bool restricted; 80static int restricted;
81module_param(restricted, bool, 0); 81module_param(restricted, bool, 0);
82MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); 82MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
83 83
84static bool power_status; 84static int power_status;
85module_param(power_status, bool, 0600); 85module_param(power_status, bool, 0600);
86MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); 86MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
87 87
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index cdd4c09fda9..3ed20e8abc0 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -560,7 +560,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
560 BT_CONTROL(BT_H_BUSY); /* set */ 560 BT_CONTROL(BT_H_BUSY); /* set */
561 561
562 /* 562 /*
563 * Uncached, ordered writes should just proceed serially but 563 * Uncached, ordered writes should just proceeed serially but
564 * some BMCs don't clear B2H_ATN with one hit. Fast-path a 564 * some BMCs don't clear B2H_ATN with one hit. Fast-path a
565 * workaround without too much penalty to the general case. 565 * workaround without too much penalty to the general case.
566 */ 566 */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 9eb360ff8ca..2aa3977aae5 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <asm/system.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
38#include <linux/sched.h> 39#include <linux/sched.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index e53fc24c6af..cf82fedae09 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH 118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
119 119
120/* Timeouts in microseconds. */ 120/* Timeouts in microseconds. */
121#define IBF_RETRY_TIMEOUT 5000000 121#define IBF_RETRY_TIMEOUT 1000000
122#define OBF_RETRY_TIMEOUT 5000000 122#define OBF_RETRY_TIMEOUT 1000000
123#define MAX_ERROR_RETRIES 10 123#define MAX_ERROR_RETRIES 10
124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
125 125
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 053201b062a..58c0e6387cf 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <asm/system.h>
36#include <linux/poll.h> 37#include <linux/poll.h>
37#include <linux/sched.h> 38#include <linux/sched.h>
38#include <linux/seq_file.h> 39#include <linux/seq_file.h>
@@ -45,7 +46,6 @@
45#include <linux/init.h> 46#include <linux/init.h>
46#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
47#include <linux/rcupdate.h> 48#include <linux/rcupdate.h>
48#include <linux/interrupt.h>
49 49
50#define PFX "IPMI message handler: " 50#define PFX "IPMI message handler: "
51 51
@@ -53,8 +53,6 @@
53 53
54static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 54static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55static int ipmi_init_msghandler(void); 55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf);
58 56
59static int initialized; 57static int initialized;
60 58
@@ -357,15 +355,12 @@ struct ipmi_smi {
357 int curr_seq; 355 int curr_seq;
358 356
359 /* 357 /*
360 * Messages queued for delivery. If delivery fails (out of memory 358 * Messages that were delayed for some reason (out of memory,
361 * for instance), They will stay in here to be processed later in a 359 * for instance), will go in here to be processed later in a
362 * periodic timer interrupt. The tasklet is for handling received 360 * periodic timer interrupt.
363 * messages directly from the handler.
364 */ 361 */
365 spinlock_t waiting_msgs_lock; 362 spinlock_t waiting_msgs_lock;
366 struct list_head waiting_msgs; 363 struct list_head waiting_msgs;
367 atomic_t watchdog_pretimeouts_to_deliver;
368 struct tasklet_struct recv_tasklet;
369 364
370 /* 365 /*
371 * The list of command receivers that are registered for commands 366 * The list of command receivers that are registered for commands
@@ -498,8 +493,6 @@ static void clean_up_interface_data(ipmi_smi_t intf)
498 struct cmd_rcvr *rcvr, *rcvr2; 493 struct cmd_rcvr *rcvr, *rcvr2;
499 struct list_head list; 494 struct list_head list;
500 495
501 tasklet_kill(&intf->recv_tasklet);
502
503 free_smi_msg_list(&intf->waiting_msgs); 496 free_smi_msg_list(&intf->waiting_msgs);
504 free_recv_msg_list(&intf->waiting_events); 497 free_recv_msg_list(&intf->waiting_events);
505 498
@@ -1880,7 +1873,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1880 struct ipmi_recv_msg *supplied_recv, 1873 struct ipmi_recv_msg *supplied_recv,
1881 int priority) 1874 int priority)
1882{ 1875{
1883 unsigned char saddr = 0, lun = 0; 1876 unsigned char saddr, lun;
1884 int rv; 1877 int rv;
1885 1878
1886 if (!user) 1879 if (!user)
@@ -2793,17 +2786,12 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2793 return; 2786 return;
2794} 2787}
2795 2788
2796static void ipmi_poll(ipmi_smi_t intf) 2789void ipmi_poll_interface(ipmi_user_t user)
2797{ 2790{
2791 ipmi_smi_t intf = user->intf;
2792
2798 if (intf->handlers->poll) 2793 if (intf->handlers->poll)
2799 intf->handlers->poll(intf->send_info); 2794 intf->handlers->poll(intf->send_info);
2800 /* In case something came in */
2801 handle_new_recv_msgs(intf);
2802}
2803
2804void ipmi_poll_interface(ipmi_user_t user)
2805{
2806 ipmi_poll(user->intf);
2807} 2795}
2808EXPORT_SYMBOL(ipmi_poll_interface); 2796EXPORT_SYMBOL(ipmi_poll_interface);
2809 2797
@@ -2872,10 +2860,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2872#endif 2860#endif
2873 spin_lock_init(&intf->waiting_msgs_lock); 2861 spin_lock_init(&intf->waiting_msgs_lock);
2874 INIT_LIST_HEAD(&intf->waiting_msgs); 2862 INIT_LIST_HEAD(&intf->waiting_msgs);
2875 tasklet_init(&intf->recv_tasklet,
2876 smi_recv_tasklet,
2877 (unsigned long) intf);
2878 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2879 spin_lock_init(&intf->events_lock); 2863 spin_lock_init(&intf->events_lock);
2880 INIT_LIST_HEAD(&intf->waiting_events); 2864 INIT_LIST_HEAD(&intf->waiting_events);
2881 intf->waiting_events_count = 0; 2865 intf->waiting_events_count = 0;
@@ -3638,11 +3622,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3638} 3622}
3639 3623
3640/* 3624/*
3641 * Handle a received message. Return 1 if the message should be requeued, 3625 * Handle a new message. Return 1 if the message should be requeued,
3642 * 0 if the message should be freed, or -1 if the message should not 3626 * 0 if the message should be freed, or -1 if the message should not
3643 * be freed or requeued. 3627 * be freed or requeued.
3644 */ 3628 */
3645static int handle_one_recv_msg(ipmi_smi_t intf, 3629static int handle_new_recv_msg(ipmi_smi_t intf,
3646 struct ipmi_smi_msg *msg) 3630 struct ipmi_smi_msg *msg)
3647{ 3631{
3648 int requeue; 3632 int requeue;
@@ -3789,7 +3773,7 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
3789 3773
3790 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3774 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3791 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 3775 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
3792 /* It's an asynchronous event. */ 3776 /* It's an asyncronous event. */
3793 requeue = handle_read_event_rsp(intf, msg); 3777 requeue = handle_read_event_rsp(intf, msg);
3794 } else { 3778 } else {
3795 /* It's a response from the local BMC. */ 3779 /* It's a response from the local BMC. */
@@ -3800,72 +3784,12 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
3800 return requeue; 3784 return requeue;
3801} 3785}
3802 3786
3803/*
3804 * If there are messages in the queue or pretimeouts, handle them.
3805 */
3806static void handle_new_recv_msgs(ipmi_smi_t intf)
3807{
3808 struct ipmi_smi_msg *smi_msg;
3809 unsigned long flags = 0;
3810 int rv;
3811 int run_to_completion = intf->run_to_completion;
3812
3813 /* See if any waiting messages need to be processed. */
3814 if (!run_to_completion)
3815 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3816 while (!list_empty(&intf->waiting_msgs)) {
3817 smi_msg = list_entry(intf->waiting_msgs.next,
3818 struct ipmi_smi_msg, link);
3819 list_del(&smi_msg->link);
3820 if (!run_to_completion)
3821 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3822 rv = handle_one_recv_msg(intf, smi_msg);
3823 if (!run_to_completion)
3824 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3825 if (rv == 0) {
3826 /* Message handled */
3827 ipmi_free_smi_msg(smi_msg);
3828 } else if (rv < 0) {
3829 /* Fatal error on the message, del but don't free. */
3830 } else {
3831 /*
3832 * To preserve message order, quit if we
3833 * can't handle a message.
3834 */
3835 list_add(&smi_msg->link, &intf->waiting_msgs);
3836 break;
3837 }
3838 }
3839 if (!run_to_completion)
3840 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3841
3842 /*
3843 * If the pretimout count is non-zero, decrement one from it and
3844 * deliver pretimeouts to all the users.
3845 */
3846 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
3847 ipmi_user_t user;
3848
3849 rcu_read_lock();
3850 list_for_each_entry_rcu(user, &intf->users, link) {
3851 if (user->handler->ipmi_watchdog_pretimeout)
3852 user->handler->ipmi_watchdog_pretimeout(
3853 user->handler_data);
3854 }
3855 rcu_read_unlock();
3856 }
3857}
3858
3859static void smi_recv_tasklet(unsigned long val)
3860{
3861 handle_new_recv_msgs((ipmi_smi_t) val);
3862}
3863
3864/* Handle a new message from the lower layer. */ 3787/* Handle a new message from the lower layer. */
3865void ipmi_smi_msg_received(ipmi_smi_t intf, 3788void ipmi_smi_msg_received(ipmi_smi_t intf,
3866 struct ipmi_smi_msg *msg) 3789 struct ipmi_smi_msg *msg)
3867{ 3790{
3868 unsigned long flags = 0; /* keep us warning-free. */ 3791 unsigned long flags = 0; /* keep us warning-free. */
3792 int rv;
3869 int run_to_completion; 3793 int run_to_completion;
3870 3794
3871 3795
@@ -3919,11 +3843,31 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3919 run_to_completion = intf->run_to_completion; 3843 run_to_completion = intf->run_to_completion;
3920 if (!run_to_completion) 3844 if (!run_to_completion)
3921 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3845 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3922 list_add_tail(&msg->link, &intf->waiting_msgs); 3846 if (!list_empty(&intf->waiting_msgs)) {
3847 list_add_tail(&msg->link, &intf->waiting_msgs);
3848 if (!run_to_completion)
3849 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3850 goto out;
3851 }
3923 if (!run_to_completion) 3852 if (!run_to_completion)
3924 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3853 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3925 3854
3926 tasklet_schedule(&intf->recv_tasklet); 3855 rv = handle_new_recv_msg(intf, msg);
3856 if (rv > 0) {
3857 /*
3858 * Could not handle the message now, just add it to a
3859 * list to handle later.
3860 */
3861 run_to_completion = intf->run_to_completion;
3862 if (!run_to_completion)
3863 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3864 list_add_tail(&msg->link, &intf->waiting_msgs);
3865 if (!run_to_completion)
3866 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3867 } else if (rv == 0) {
3868 ipmi_free_smi_msg(msg);
3869 }
3870
3927 out: 3871 out:
3928 return; 3872 return;
3929} 3873}
@@ -3931,8 +3875,16 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
3931 3875
3932void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3876void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3933{ 3877{
3934 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 3878 ipmi_user_t user;
3935 tasklet_schedule(&intf->recv_tasklet); 3879
3880 rcu_read_lock();
3881 list_for_each_entry_rcu(user, &intf->users, link) {
3882 if (!user->handler->ipmi_watchdog_pretimeout)
3883 continue;
3884
3885 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3886 }
3887 rcu_read_unlock();
3936} 3888}
3937EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3889EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3938 3890
@@ -4046,12 +3998,28 @@ static void ipmi_timeout_handler(long timeout_period)
4046 ipmi_smi_t intf; 3998 ipmi_smi_t intf;
4047 struct list_head timeouts; 3999 struct list_head timeouts;
4048 struct ipmi_recv_msg *msg, *msg2; 4000 struct ipmi_recv_msg *msg, *msg2;
4001 struct ipmi_smi_msg *smi_msg, *smi_msg2;
4049 unsigned long flags; 4002 unsigned long flags;
4050 int i; 4003 int i;
4051 4004
4052 rcu_read_lock(); 4005 rcu_read_lock();
4053 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4006 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4054 tasklet_schedule(&intf->recv_tasklet); 4007 /* See if any waiting messages need to be processed. */
4008 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
4009 list_for_each_entry_safe(smi_msg, smi_msg2,
4010 &intf->waiting_msgs, link) {
4011 if (!handle_new_recv_msg(intf, smi_msg)) {
4012 list_del(&smi_msg->link);
4013 ipmi_free_smi_msg(smi_msg);
4014 } else {
4015 /*
4016 * To preserve message order, quit if we
4017 * can't handle a message.
4018 */
4019 break;
4020 }
4021 }
4022 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
4055 4023
4056 /* 4024 /*
4057 * Go through the seq table and find any messages that 4025 * Go through the seq table and find any messages that
@@ -4205,48 +4173,12 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
4205 4173
4206#ifdef CONFIG_IPMI_PANIC_EVENT 4174#ifdef CONFIG_IPMI_PANIC_EVENT
4207 4175
4208static atomic_t panic_done_count = ATOMIC_INIT(0);
4209
4210static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4176static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4211{ 4177{
4212 atomic_dec(&panic_done_count);
4213} 4178}
4214 4179
4215static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4180static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4216{ 4181{
4217 atomic_dec(&panic_done_count);
4218}
4219
4220/*
4221 * Inside a panic, send a message and wait for a response.
4222 */
4223static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4224 struct ipmi_addr *addr,
4225 struct kernel_ipmi_msg *msg)
4226{
4227 struct ipmi_smi_msg smi_msg;
4228 struct ipmi_recv_msg recv_msg;
4229 int rv;
4230
4231 smi_msg.done = dummy_smi_done_handler;
4232 recv_msg.done = dummy_recv_done_handler;
4233 atomic_add(2, &panic_done_count);
4234 rv = i_ipmi_request(NULL,
4235 intf,
4236 addr,
4237 0,
4238 msg,
4239 intf,
4240 &smi_msg,
4241 &recv_msg,
4242 0,
4243 intf->channels[0].address,
4244 intf->channels[0].lun,
4245 0, 1); /* Don't retry, and don't wait. */
4246 if (rv)
4247 atomic_sub(2, &panic_done_count);
4248 while (atomic_read(&panic_done_count) != 0)
4249 ipmi_poll(intf);
4250} 4182}
4251 4183
4252#ifdef CONFIG_IPMI_PANIC_STRING 4184#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4285,6 +4217,8 @@ static void send_panic_events(char *str)
4285 unsigned char data[16]; 4217 unsigned char data[16];
4286 struct ipmi_system_interface_addr *si; 4218 struct ipmi_system_interface_addr *si;
4287 struct ipmi_addr addr; 4219 struct ipmi_addr addr;
4220 struct ipmi_smi_msg smi_msg;
4221 struct ipmi_recv_msg recv_msg;
4288 4222
4289 si = (struct ipmi_system_interface_addr *) &addr; 4223 si = (struct ipmi_system_interface_addr *) &addr;
4290 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4224 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4312,6 +4246,9 @@ static void send_panic_events(char *str)
4312 data[7] = str[2]; 4246 data[7] = str[2];
4313 } 4247 }
4314 4248
4249 smi_msg.done = dummy_smi_done_handler;
4250 recv_msg.done = dummy_recv_done_handler;
4251
4315 /* For every registered interface, send the event. */ 4252 /* For every registered interface, send the event. */
4316 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4253 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4317 if (!intf->handlers) 4254 if (!intf->handlers)
@@ -4321,7 +4258,18 @@ static void send_panic_events(char *str)
4321 intf->run_to_completion = 1; 4258 intf->run_to_completion = 1;
4322 /* Send the event announcing the panic. */ 4259 /* Send the event announcing the panic. */
4323 intf->handlers->set_run_to_completion(intf->send_info, 1); 4260 intf->handlers->set_run_to_completion(intf->send_info, 1);
4324 ipmi_panic_request_and_wait(intf, &addr, &msg); 4261 i_ipmi_request(NULL,
4262 intf,
4263 &addr,
4264 0,
4265 &msg,
4266 intf,
4267 &smi_msg,
4268 &recv_msg,
4269 0,
4270 intf->channels[0].address,
4271 intf->channels[0].lun,
4272 0, 1); /* Don't retry, and don't wait. */
4325 } 4273 }
4326 4274
4327#ifdef CONFIG_IPMI_PANIC_STRING 4275#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4369,7 +4317,18 @@ static void send_panic_events(char *str)
4369 msg.data = NULL; 4317 msg.data = NULL;
4370 msg.data_len = 0; 4318 msg.data_len = 0;
4371 intf->null_user_handler = device_id_fetcher; 4319 intf->null_user_handler = device_id_fetcher;
4372 ipmi_panic_request_and_wait(intf, &addr, &msg); 4320 i_ipmi_request(NULL,
4321 intf,
4322 &addr,
4323 0,
4324 &msg,
4325 intf,
4326 &smi_msg,
4327 &recv_msg,
4328 0,
4329 intf->channels[0].address,
4330 intf->channels[0].lun,
4331 0, 1); /* Don't retry, and don't wait. */
4373 4332
4374 if (intf->local_event_generator) { 4333 if (intf->local_event_generator) {
4375 /* Request the event receiver from the local MC. */ 4334 /* Request the event receiver from the local MC. */
@@ -4378,7 +4337,18 @@ static void send_panic_events(char *str)
4378 msg.data = NULL; 4337 msg.data = NULL;
4379 msg.data_len = 0; 4338 msg.data_len = 0;
4380 intf->null_user_handler = event_receiver_fetcher; 4339 intf->null_user_handler = event_receiver_fetcher;
4381 ipmi_panic_request_and_wait(intf, &addr, &msg); 4340 i_ipmi_request(NULL,
4341 intf,
4342 &addr,
4343 0,
4344 &msg,
4345 intf,
4346 &smi_msg,
4347 &recv_msg,
4348 0,
4349 intf->channels[0].address,
4350 intf->channels[0].lun,
4351 0, 1); /* no retry, and no wait. */
4382 } 4352 }
4383 intf->null_user_handler = NULL; 4353 intf->null_user_handler = NULL;
4384 4354
@@ -4435,7 +4405,18 @@ static void send_panic_events(char *str)
4435 strncpy(data+5, p, 11); 4405 strncpy(data+5, p, 11);
4436 p += size; 4406 p += size;
4437 4407
4438 ipmi_panic_request_and_wait(intf, &addr, &msg); 4408 i_ipmi_request(NULL,
4409 intf,
4410 &addr,
4411 0,
4412 &msg,
4413 intf,
4414 &smi_msg,
4415 &recv_msg,
4416 0,
4417 intf->channels[0].address,
4418 intf->channels[0].lun,
4419 0, 1); /* no retry, and no wait. */
4439 } 4420 }
4440 } 4421 }
4441#endif /* CONFIG_IPMI_PANIC_STRING */ 4422#endif /* CONFIG_IPMI_PANIC_STRING */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1c7fdcd22a9..9397ab49b72 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -41,6 +41,7 @@
41 41
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <asm/system.h>
44#include <linux/sched.h> 45#include <linux/sched.h>
45#include <linux/seq_file.h> 46#include <linux/seq_file.h>
46#include <linux/timer.h> 47#include <linux/timer.h>
@@ -155,7 +156,7 @@ enum si_stat_indexes {
155 /* Number of watchdog pretimeouts. */ 156 /* Number of watchdog pretimeouts. */
156 SI_STAT_watchdog_pretimeouts, 157 SI_STAT_watchdog_pretimeouts,
157 158
158 /* Number of asynchronous messages received. */ 159 /* Number of asyncronous messages received. */
159 SI_STAT_incoming_messages, 160 SI_STAT_incoming_messages,
160 161
161 162
@@ -170,6 +171,7 @@ struct smi_info {
170 struct si_sm_handlers *handlers; 171 struct si_sm_handlers *handlers;
171 enum si_type si_type; 172 enum si_type si_type;
172 spinlock_t si_lock; 173 spinlock_t si_lock;
174 spinlock_t msg_lock;
173 struct list_head xmit_msgs; 175 struct list_head xmit_msgs;
174 struct list_head hp_xmit_msgs; 176 struct list_head hp_xmit_msgs;
175 struct ipmi_smi_msg *curr_msg; 177 struct ipmi_smi_msg *curr_msg;
@@ -318,8 +320,16 @@ static int register_xaction_notifier(struct notifier_block *nb)
318static void deliver_recv_msg(struct smi_info *smi_info, 320static void deliver_recv_msg(struct smi_info *smi_info,
319 struct ipmi_smi_msg *msg) 321 struct ipmi_smi_msg *msg)
320{ 322{
321 /* Deliver the message to the upper layer. */ 323 /* Deliver the message to the upper layer with the lock
322 ipmi_smi_msg_received(smi_info->intf, msg); 324 released. */
325
326 if (smi_info->run_to_completion) {
327 ipmi_smi_msg_received(smi_info->intf, msg);
328 } else {
329 spin_unlock(&(smi_info->si_lock));
330 ipmi_smi_msg_received(smi_info->intf, msg);
331 spin_lock(&(smi_info->si_lock));
332 }
323} 333}
324 334
325static void return_hosed_msg(struct smi_info *smi_info, int cCode) 335static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -348,6 +358,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
348 struct timeval t; 358 struct timeval t;
349#endif 359#endif
350 360
361 /*
362 * No need to save flags, we aleady have interrupts off and we
363 * already hold the SMI lock.
364 */
365 if (!smi_info->run_to_completion)
366 spin_lock(&(smi_info->msg_lock));
367
351 /* Pick the high priority queue first. */ 368 /* Pick the high priority queue first. */
352 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 369 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
353 entry = smi_info->hp_xmit_msgs.next; 370 entry = smi_info->hp_xmit_msgs.next;
@@ -385,6 +402,9 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
385 rv = SI_SM_CALL_WITHOUT_DELAY; 402 rv = SI_SM_CALL_WITHOUT_DELAY;
386 } 403 }
387 out: 404 out:
405 if (!smi_info->run_to_completion)
406 spin_unlock(&(smi_info->msg_lock));
407
388 return rv; 408 return rv;
389} 409}
390 410
@@ -461,7 +481,9 @@ static void handle_flags(struct smi_info *smi_info)
461 481
462 start_clear_flags(smi_info); 482 start_clear_flags(smi_info);
463 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 483 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
484 spin_unlock(&(smi_info->si_lock));
464 ipmi_smi_watchdog_pretimeout(smi_info->intf); 485 ipmi_smi_watchdog_pretimeout(smi_info->intf);
486 spin_lock(&(smi_info->si_lock));
465 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 487 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
466 /* Messages available. */ 488 /* Messages available. */
467 smi_info->curr_msg = ipmi_alloc_smi_msg(); 489 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -867,6 +889,19 @@ static void sender(void *send_info,
867 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 889 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868#endif 890#endif
869 891
892 /*
893 * last_timeout_jiffies is updated here to avoid
894 * smi_timeout() handler passing very large time_diff
895 * value to smi_event_handler() that causes
896 * the send command to abort.
897 */
898 smi_info->last_timeout_jiffies = jiffies;
899
900 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
901
902 if (smi_info->thread)
903 wake_up_process(smi_info->thread);
904
870 if (smi_info->run_to_completion) { 905 if (smi_info->run_to_completion) {
871 /* 906 /*
872 * If we are running to completion, then throw it in 907 * If we are running to completion, then throw it in
@@ -889,29 +924,16 @@ static void sender(void *send_info,
889 return; 924 return;
890 } 925 }
891 926
892 spin_lock_irqsave(&smi_info->si_lock, flags); 927 spin_lock_irqsave(&smi_info->msg_lock, flags);
893 if (priority > 0) 928 if (priority > 0)
894 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); 929 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
895 else 930 else
896 list_add_tail(&msg->link, &smi_info->xmit_msgs); 931 list_add_tail(&msg->link, &smi_info->xmit_msgs);
932 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
897 933
898 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { 934 spin_lock_irqsave(&smi_info->si_lock, flags);
899 /* 935 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
900 * last_timeout_jiffies is updated here to avoid
901 * smi_timeout() handler passing very large time_diff
902 * value to smi_event_handler() that causes
903 * the send command to abort.
904 */
905 smi_info->last_timeout_jiffies = jiffies;
906
907 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
908
909 if (smi_info->thread)
910 wake_up_process(smi_info->thread);
911
912 start_next_msg(smi_info); 936 start_next_msg(smi_info);
913 smi_event_handler(smi_info, 0);
914 }
915 spin_unlock_irqrestore(&smi_info->si_lock, flags); 937 spin_unlock_irqrestore(&smi_info->si_lock, flags);
916} 938}
917 939
@@ -1012,19 +1034,16 @@ static int ipmi_thread(void *data)
1012static void poll(void *send_info) 1034static void poll(void *send_info)
1013{ 1035{
1014 struct smi_info *smi_info = send_info; 1036 struct smi_info *smi_info = send_info;
1015 unsigned long flags = 0; 1037 unsigned long flags;
1016 int run_to_completion = smi_info->run_to_completion;
1017 1038
1018 /* 1039 /*
1019 * Make sure there is some delay in the poll loop so we can 1040 * Make sure there is some delay in the poll loop so we can
1020 * drive time forward and timeout things. 1041 * drive time forward and timeout things.
1021 */ 1042 */
1022 udelay(10); 1043 udelay(10);
1023 if (!run_to_completion) 1044 spin_lock_irqsave(&smi_info->si_lock, flags);
1024 spin_lock_irqsave(&smi_info->si_lock, flags);
1025 smi_event_handler(smi_info, 10); 1045 smi_event_handler(smi_info, 10);
1026 if (!run_to_completion) 1046 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1027 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1028} 1047}
1029 1048
1030static void request_events(void *send_info) 1049static void request_events(void *send_info)
@@ -1208,7 +1227,7 @@ static int smi_num; /* Used to sequence the SMIs */
1208#define DEFAULT_REGSPACING 1 1227#define DEFAULT_REGSPACING 1
1209#define DEFAULT_REGSIZE 1 1228#define DEFAULT_REGSIZE 1
1210 1229
1211static bool si_trydefaults = 1; 1230static int si_trydefaults = 1;
1212static char *si_type[SI_MAX_PARMS]; 1231static char *si_type[SI_MAX_PARMS];
1213#define MAX_SI_TYPE_STR 30 1232#define MAX_SI_TYPE_STR 30
1214static char si_type_str[MAX_SI_TYPE_STR]; 1233static char si_type_str[MAX_SI_TYPE_STR];
@@ -1661,8 +1680,10 @@ static struct smi_info *smi_info_alloc(void)
1661{ 1680{
1662 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 1681 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1663 1682
1664 if (info) 1683 if (info) {
1665 spin_lock_init(&info->si_lock); 1684 spin_lock_init(&info->si_lock);
1685 spin_lock_init(&info->msg_lock);
1686 }
1666 return info; 1687 return info;
1667} 1688}
1668 1689
@@ -1836,7 +1857,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1836 return rv; 1857 return rv;
1837} 1858}
1838 1859
1839static int hardcode_find_bmc(void) 1860static int __devinit hardcode_find_bmc(void)
1840{ 1861{
1841 int ret = -ENODEV; 1862 int ret = -ENODEV;
1842 int i; 1863 int i;
@@ -2023,7 +2044,7 @@ struct SPMITable {
2023 s8 spmi_id[1]; /* A '\0' terminated array starts here. */ 2044 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
2024}; 2045};
2025 2046
2026static int try_init_spmi(struct SPMITable *spmi) 2047static int __devinit try_init_spmi(struct SPMITable *spmi)
2027{ 2048{
2028 struct smi_info *info; 2049 struct smi_info *info;
2029 2050
@@ -2106,7 +2127,7 @@ static int try_init_spmi(struct SPMITable *spmi)
2106 return 0; 2127 return 0;
2107} 2128}
2108 2129
2109static void spmi_find_bmc(void) 2130static void __devinit spmi_find_bmc(void)
2110{ 2131{
2111 acpi_status status; 2132 acpi_status status;
2112 struct SPMITable *spmi; 2133 struct SPMITable *spmi;
@@ -2128,7 +2149,7 @@ static void spmi_find_bmc(void)
2128 } 2149 }
2129} 2150}
2130 2151
2131static int ipmi_pnp_probe(struct pnp_dev *dev, 2152static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2132 const struct pnp_device_id *dev_id) 2153 const struct pnp_device_id *dev_id)
2133{ 2154{
2134 struct acpi_device *acpi_dev; 2155 struct acpi_device *acpi_dev;
@@ -2228,7 +2249,7 @@ err_free:
2228 return -EINVAL; 2249 return -EINVAL;
2229} 2250}
2230 2251
2231static void ipmi_pnp_remove(struct pnp_dev *dev) 2252static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
2232{ 2253{
2233 struct smi_info *info = pnp_get_drvdata(dev); 2254 struct smi_info *info = pnp_get_drvdata(dev);
2234 2255
@@ -2243,7 +2264,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
2243static struct pnp_driver ipmi_pnp_driver = { 2264static struct pnp_driver ipmi_pnp_driver = {
2244 .name = DEVICE_NAME, 2265 .name = DEVICE_NAME,
2245 .probe = ipmi_pnp_probe, 2266 .probe = ipmi_pnp_probe,
2246 .remove = ipmi_pnp_remove, 2267 .remove = __devexit_p(ipmi_pnp_remove),
2247 .id_table = pnp_dev_table, 2268 .id_table = pnp_dev_table,
2248}; 2269};
2249#endif 2270#endif
@@ -2258,7 +2279,7 @@ struct dmi_ipmi_data {
2258 u8 slave_addr; 2279 u8 slave_addr;
2259}; 2280};
2260 2281
2261static int decode_dmi(const struct dmi_header *dm, 2282static int __devinit decode_dmi(const struct dmi_header *dm,
2262 struct dmi_ipmi_data *dmi) 2283 struct dmi_ipmi_data *dmi)
2263{ 2284{
2264 const u8 *data = (const u8 *)dm; 2285 const u8 *data = (const u8 *)dm;
@@ -2320,7 +2341,7 @@ static int decode_dmi(const struct dmi_header *dm,
2320 return 0; 2341 return 0;
2321} 2342}
2322 2343
2323static void try_init_dmi(struct dmi_ipmi_data *ipmi_data) 2344static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2324{ 2345{
2325 struct smi_info *info; 2346 struct smi_info *info;
2326 2347
@@ -2388,7 +2409,7 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2388 kfree(info); 2409 kfree(info);
2389} 2410}
2390 2411
2391static void dmi_find_bmc(void) 2412static void __devinit dmi_find_bmc(void)
2392{ 2413{
2393 const struct dmi_device *dev = NULL; 2414 const struct dmi_device *dev = NULL;
2394 struct dmi_ipmi_data data; 2415 struct dmi_ipmi_data data;
@@ -2424,39 +2445,7 @@ static void ipmi_pci_cleanup(struct smi_info *info)
2424 pci_disable_device(pdev); 2445 pci_disable_device(pdev);
2425} 2446}
2426 2447
2427static int ipmi_pci_probe_regspacing(struct smi_info *info) 2448static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2428{
2429 if (info->si_type == SI_KCS) {
2430 unsigned char status;
2431 int regspacing;
2432
2433 info->io.regsize = DEFAULT_REGSIZE;
2434 info->io.regshift = 0;
2435 info->io_size = 2;
2436 info->handlers = &kcs_smi_handlers;
2437
2438 /* detect 1, 4, 16byte spacing */
2439 for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
2440 info->io.regspacing = regspacing;
2441 if (info->io_setup(info)) {
2442 dev_err(info->dev,
2443 "Could not setup I/O space\n");
2444 return DEFAULT_REGSPACING;
2445 }
2446 /* write invalid cmd */
2447 info->io.outputb(&info->io, 1, 0x10);
2448 /* read status back */
2449 status = info->io.inputb(&info->io, 1);
2450 info->io_cleanup(info);
2451 if (status)
2452 return regspacing;
2453 regspacing *= 4;
2454 }
2455 }
2456 return DEFAULT_REGSPACING;
2457}
2458
2459static int ipmi_pci_probe(struct pci_dev *pdev,
2460 const struct pci_device_id *ent) 2449 const struct pci_device_id *ent)
2461{ 2450{
2462 int rv; 2451 int rv;
@@ -2508,8 +2497,8 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
2508 } 2497 }
2509 info->io.addr_data = pci_resource_start(pdev, 0); 2498 info->io.addr_data = pci_resource_start(pdev, 0);
2510 2499
2511 info->io.regspacing = ipmi_pci_probe_regspacing(info); 2500 info->io.regspacing = DEFAULT_REGSPACING;
2512 info->io.regsize = DEFAULT_REGSIZE; 2501 info->io.regsize = DEFAULT_REGSPACING;
2513 info->io.regshift = 0; 2502 info->io.regshift = 0;
2514 2503
2515 info->irq = pdev->irq; 2504 info->irq = pdev->irq;
@@ -2529,12 +2518,24 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
2529 return 0; 2518 return 0;
2530} 2519}
2531 2520
2532static void ipmi_pci_remove(struct pci_dev *pdev) 2521static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2533{ 2522{
2534 struct smi_info *info = pci_get_drvdata(pdev); 2523 struct smi_info *info = pci_get_drvdata(pdev);
2535 cleanup_one_si(info); 2524 cleanup_one_si(info);
2536} 2525}
2537 2526
2527#ifdef CONFIG_PM
2528static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2529{
2530 return 0;
2531}
2532
2533static int ipmi_pci_resume(struct pci_dev *pdev)
2534{
2535 return 0;
2536}
2537#endif
2538
2538static struct pci_device_id ipmi_pci_devices[] = { 2539static struct pci_device_id ipmi_pci_devices[] = {
2539 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, 2540 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2540 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, 2541 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
@@ -2546,12 +2547,16 @@ static struct pci_driver ipmi_pci_driver = {
2546 .name = DEVICE_NAME, 2547 .name = DEVICE_NAME,
2547 .id_table = ipmi_pci_devices, 2548 .id_table = ipmi_pci_devices,
2548 .probe = ipmi_pci_probe, 2549 .probe = ipmi_pci_probe,
2549 .remove = ipmi_pci_remove, 2550 .remove = __devexit_p(ipmi_pci_remove),
2551#ifdef CONFIG_PM
2552 .suspend = ipmi_pci_suspend,
2553 .resume = ipmi_pci_resume,
2554#endif
2550}; 2555};
2551#endif /* CONFIG_PCI */ 2556#endif /* CONFIG_PCI */
2552 2557
2553static struct of_device_id ipmi_match[]; 2558static struct of_device_id ipmi_match[];
2554static int ipmi_probe(struct platform_device *dev) 2559static int __devinit ipmi_probe(struct platform_device *dev)
2555{ 2560{
2556#ifdef CONFIG_OF 2561#ifdef CONFIG_OF
2557 const struct of_device_id *match; 2562 const struct of_device_id *match;
@@ -2635,7 +2640,7 @@ static int ipmi_probe(struct platform_device *dev)
2635 return 0; 2640 return 0;
2636} 2641}
2637 2642
2638static int ipmi_remove(struct platform_device *dev) 2643static int __devexit ipmi_remove(struct platform_device *dev)
2639{ 2644{
2640#ifdef CONFIG_OF 2645#ifdef CONFIG_OF
2641 cleanup_one_si(dev_get_drvdata(&dev->dev)); 2646 cleanup_one_si(dev_get_drvdata(&dev->dev));
@@ -2661,7 +2666,7 @@ static struct platform_driver ipmi_driver = {
2661 .of_match_table = ipmi_match, 2666 .of_match_table = ipmi_match,
2662 }, 2667 },
2663 .probe = ipmi_probe, 2668 .probe = ipmi_probe,
2664 .remove = ipmi_remove, 2669 .remove = __devexit_p(ipmi_remove),
2665}; 2670};
2666 2671
2667static int wait_for_msg_done(struct smi_info *smi_info) 2672static int wait_for_msg_done(struct smi_info *smi_info)
@@ -3047,7 +3052,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3047 } 3052 }
3048} 3053}
3049 3054
3050static struct ipmi_default_vals 3055static __devinitdata struct ipmi_default_vals
3051{ 3056{
3052 int type; 3057 int type;
3053 int port; 3058 int port;
@@ -3059,7 +3064,7 @@ static struct ipmi_default_vals
3059 { .port = 0 } 3064 { .port = 0 }
3060}; 3065};
3061 3066
3062static void default_find_bmc(void) 3067static void __devinit default_find_bmc(void)
3063{ 3068{
3064 struct smi_info *info; 3069 struct smi_info *info;
3065 int i; 3070 int i;
@@ -3359,7 +3364,7 @@ static int try_smi_init(struct smi_info *new_smi)
3359 return rv; 3364 return rv;
3360} 3365}
3361 3366
3362static int init_ipmi_si(void) 3367static int __devinit init_ipmi_si(void)
3363{ 3368{
3364 int i; 3369 int i;
3365 char *str; 3370 char *str;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 37b8be7cba9..3302586655c 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -65,7 +65,6 @@
65 * mechanism for it at that time. 65 * mechanism for it at that time.
66 */ 66 */
67#include <asm/kdebug.h> 67#include <asm/kdebug.h>
68#include <asm/nmi.h>
69#define HAVE_DIE_NMI 68#define HAVE_DIE_NMI
70#endif 69#endif
71 70
@@ -139,10 +138,19 @@
139#define IPMI_WDOG_SET_TIMER 0x24 138#define IPMI_WDOG_SET_TIMER 0x24
140#define IPMI_WDOG_GET_TIMER 0x25 139#define IPMI_WDOG_GET_TIMER 0x25
141 140
142#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80 141/* These are here until the real ones get into the watchdog.h interface. */
142#ifndef WDIOC_GETTIMEOUT
143#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
144#endif
145#ifndef WDIOC_SET_PRETIMEOUT
146#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
147#endif
148#ifndef WDIOC_GET_PRETIMEOUT
149#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
150#endif
143 151
144static DEFINE_MUTEX(ipmi_watchdog_mutex); 152static DEFINE_MUTEX(ipmi_watchdog_mutex);
145static bool nowayout = WATCHDOG_NOWAYOUT; 153static int nowayout = WATCHDOG_NOWAYOUT;
146 154
147static ipmi_user_t watchdog_user; 155static ipmi_user_t watchdog_user;
148static int watchdog_ifnum; 156static int watchdog_ifnum;
@@ -309,7 +317,7 @@ module_param(start_now, int, 0444);
309MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" 317MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
310 "soon as the driver is loaded."); 318 "soon as the driver is loaded.");
311 319
312module_param(nowayout, bool, 0644); 320module_param(nowayout, int, 0644);
313MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " 321MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
314 "(default=CONFIG_WATCHDOG_NOWAYOUT)"); 322 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
315 323
@@ -509,7 +517,6 @@ static void panic_halt_ipmi_heartbeat(void)
509 msg.cmd = IPMI_WDOG_RESET_TIMER; 517 msg.cmd = IPMI_WDOG_RESET_TIMER;
510 msg.data = NULL; 518 msg.data = NULL;
511 msg.data_len = 0; 519 msg.data_len = 0;
512 atomic_add(2, &panic_done_count);
513 rv = ipmi_request_supply_msgs(watchdog_user, 520 rv = ipmi_request_supply_msgs(watchdog_user,
514 (struct ipmi_addr *) &addr, 521 (struct ipmi_addr *) &addr,
515 0, 522 0,
@@ -518,8 +525,8 @@ static void panic_halt_ipmi_heartbeat(void)
518 &panic_halt_heartbeat_smi_msg, 525 &panic_halt_heartbeat_smi_msg,
519 &panic_halt_heartbeat_recv_msg, 526 &panic_halt_heartbeat_recv_msg,
520 1); 527 1);
521 if (rv) 528 if (!rv)
522 atomic_sub(2, &panic_done_count); 529 atomic_add(2, &panic_done_count);
523} 530}
524 531
525static struct ipmi_smi_msg panic_halt_smi_msg = { 532static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -543,18 +550,16 @@ static void panic_halt_ipmi_set_timeout(void)
543 /* Wait for the messages to be free. */ 550 /* Wait for the messages to be free. */
544 while (atomic_read(&panic_done_count) != 0) 551 while (atomic_read(&panic_done_count) != 0)
545 ipmi_poll_interface(watchdog_user); 552 ipmi_poll_interface(watchdog_user);
546 atomic_add(2, &panic_done_count);
547 rv = i_ipmi_set_timeout(&panic_halt_smi_msg, 553 rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
548 &panic_halt_recv_msg, 554 &panic_halt_recv_msg,
549 &send_heartbeat_now); 555 &send_heartbeat_now);
550 if (rv) { 556 if (!rv) {
551 atomic_sub(2, &panic_done_count); 557 atomic_add(2, &panic_done_count);
552 printk(KERN_WARNING PFX
553 "Unable to extend the watchdog timeout.");
554 } else {
555 if (send_heartbeat_now) 558 if (send_heartbeat_now)
556 panic_halt_ipmi_heartbeat(); 559 panic_halt_ipmi_heartbeat();
557 } 560 } else
561 printk(KERN_WARNING PFX
562 "Unable to extend the watchdog timeout.");
558 while (atomic_read(&panic_done_count) != 0) 563 while (atomic_read(&panic_done_count) != 0)
559 ipmi_poll_interface(watchdog_user); 564 ipmi_poll_interface(watchdog_user);
560} 565}
@@ -590,7 +595,6 @@ static int ipmi_heartbeat(void)
590 struct kernel_ipmi_msg msg; 595 struct kernel_ipmi_msg msg;
591 int rv; 596 int rv;
592 struct ipmi_system_interface_addr addr; 597 struct ipmi_system_interface_addr addr;
593 int timeout_retries = 0;
594 598
595 if (ipmi_ignore_heartbeat) 599 if (ipmi_ignore_heartbeat)
596 return 0; 600 return 0;
@@ -611,7 +615,6 @@ static int ipmi_heartbeat(void)
611 615
612 mutex_lock(&heartbeat_lock); 616 mutex_lock(&heartbeat_lock);
613 617
614restart:
615 atomic_set(&heartbeat_tofree, 2); 618 atomic_set(&heartbeat_tofree, 2);
616 619
617 /* 620 /*
@@ -649,33 +652,7 @@ restart:
649 /* Wait for the heartbeat to be sent. */ 652 /* Wait for the heartbeat to be sent. */
650 wait_for_completion(&heartbeat_wait); 653 wait_for_completion(&heartbeat_wait);
651 654
652 if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { 655 if (heartbeat_recv_msg.msg.data[0] != 0) {
653 timeout_retries++;
654 if (timeout_retries > 3) {
655 printk(KERN_ERR PFX ": Unable to restore the IPMI"
656 " watchdog's settings, giving up.\n");
657 rv = -EIO;
658 goto out_unlock;
659 }
660
661 /*
662 * The timer was not initialized, that means the BMC was
663 * probably reset and lost the watchdog information. Attempt
664 * to restore the timer's info. Note that we still hold
665 * the heartbeat lock, to keep a heartbeat from happening
666 * in this process, so must say no heartbeat to avoid a
667 * deadlock on this mutex.
668 */
669 rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
670 if (rv) {
671 printk(KERN_ERR PFX ": Unable to send the command to"
672 " set the watchdog's settings, giving up.\n");
673 goto out_unlock;
674 }
675
676 /* We might need a new heartbeat, so do it now */
677 goto restart;
678 } else if (heartbeat_recv_msg.msg.data[0] != 0) {
679 /* 656 /*
680 * Got an error in the heartbeat response. It was already 657 * Got an error in the heartbeat response. It was already
681 * reported in ipmi_wdog_msg_handler, but we should return 658 * reported in ipmi_wdog_msg_handler, but we should return
@@ -684,7 +661,6 @@ restart:
684 rv = -EINVAL; 661 rv = -EINVAL;
685 } 662 }
686 663
687out_unlock:
688 mutex_unlock(&heartbeat_lock); 664 mutex_unlock(&heartbeat_lock);
689 665
690 return rv; 666 return rv;
@@ -721,6 +697,7 @@ static int ipmi_ioctl(struct file *file,
721 return -EFAULT; 697 return -EFAULT;
722 return 0; 698 return 0;
723 699
700 case WDIOC_SET_PRETIMEOUT:
724 case WDIOC_SETPRETIMEOUT: 701 case WDIOC_SETPRETIMEOUT:
725 i = copy_from_user(&val, argp, sizeof(int)); 702 i = copy_from_user(&val, argp, sizeof(int));
726 if (i) 703 if (i)
@@ -728,6 +705,7 @@ static int ipmi_ioctl(struct file *file,
728 pretimeout = val; 705 pretimeout = val;
729 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); 706 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
730 707
708 case WDIOC_GET_PRETIMEOUT:
731 case WDIOC_GETPRETIMEOUT: 709 case WDIOC_GETPRETIMEOUT:
732 i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); 710 i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
733 if (i) 711 if (i)
@@ -943,15 +921,11 @@ static struct miscdevice ipmi_wdog_miscdev = {
943static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, 921static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
944 void *handler_data) 922 void *handler_data)
945{ 923{
946 if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && 924 if (msg->msg.data[0] != 0) {
947 msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
948 printk(KERN_INFO PFX "response: The IPMI controller appears"
949 " to have been reset, will attempt to reinitialize"
950 " the watchdog timer\n");
951 else if (msg->msg.data[0] != 0)
952 printk(KERN_ERR PFX "response: Error %x on cmd %x\n", 925 printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
953 msg->msg.data[0], 926 msg->msg.data[0],
954 msg->msg.cmd); 927 msg->msg.cmd);
928 }
955 929
956 ipmi_free_recv_msg(msg); 930 ipmi_free_recv_msg(msg);
957} 931}
@@ -1103,8 +1077,17 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
1103 1077
1104#ifdef HAVE_DIE_NMI 1078#ifdef HAVE_DIE_NMI
1105static int 1079static int
1106ipmi_nmi(unsigned int val, struct pt_regs *regs) 1080ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1107{ 1081{
1082 struct die_args *args = data;
1083
1084 if (val != DIE_NMIUNKNOWN)
1085 return NOTIFY_OK;
1086
1087 /* Hack, if it's a memory or I/O error, ignore it. */
1088 if (args->err & 0xc0)
1089 return NOTIFY_OK;
1090
1108 /* 1091 /*
1109 * If we get here, it's an NMI that's not a memory or I/O 1092 * If we get here, it's an NMI that's not a memory or I/O
1110 * error. We can't truly tell if it's from IPMI or not 1093 * error. We can't truly tell if it's from IPMI or not
@@ -1114,15 +1097,15 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs)
1114 1097
1115 if (testing_nmi) { 1098 if (testing_nmi) {
1116 testing_nmi = 2; 1099 testing_nmi = 2;
1117 return NMI_HANDLED; 1100 return NOTIFY_STOP;
1118 } 1101 }
1119 1102
1120 /* If we are not expecting a timeout, ignore it. */ 1103 /* If we are not expecting a timeout, ignore it. */
1121 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) 1104 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
1122 return NMI_DONE; 1105 return NOTIFY_OK;
1123 1106
1124 if (preaction_val != WDOG_PRETIMEOUT_NMI) 1107 if (preaction_val != WDOG_PRETIMEOUT_NMI)
1125 return NMI_DONE; 1108 return NOTIFY_OK;
1126 1109
1127 /* 1110 /*
1128 * If no one else handled the NMI, we assume it was the IPMI 1111 * If no one else handled the NMI, we assume it was the IPMI
@@ -1137,8 +1120,12 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs)
1137 panic(PFX "pre-timeout"); 1120 panic(PFX "pre-timeout");
1138 } 1121 }
1139 1122
1140 return NMI_HANDLED; 1123 return NOTIFY_STOP;
1141} 1124}
1125
1126static struct notifier_block ipmi_nmi_handler = {
1127 .notifier_call = ipmi_nmi
1128};
1142#endif 1129#endif
1143 1130
1144static int wdog_reboot_handler(struct notifier_block *this, 1131static int wdog_reboot_handler(struct notifier_block *this,
@@ -1154,7 +1141,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1154 if (code == SYS_POWER_OFF || code == SYS_HALT) { 1141 if (code == SYS_POWER_OFF || code == SYS_HALT) {
1155 /* Disable the WDT if we are shutting down. */ 1142 /* Disable the WDT if we are shutting down. */
1156 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 1143 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
1157 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); 1144 panic_halt_ipmi_set_timeout();
1158 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { 1145 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
1159 /* Set a long timer to let the reboot happens, but 1146 /* Set a long timer to let the reboot happens, but
1160 reboot if it hangs, but only if the watchdog 1147 reboot if it hangs, but only if the watchdog
@@ -1162,7 +1149,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1162 timeout = 120; 1149 timeout = 120;
1163 pretimeout = 0; 1150 pretimeout = 0;
1164 ipmi_watchdog_state = WDOG_TIMEOUT_RESET; 1151 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
1165 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); 1152 panic_halt_ipmi_set_timeout();
1166 } 1153 }
1167 } 1154 }
1168 return NOTIFY_OK; 1155 return NOTIFY_OK;
@@ -1303,8 +1290,7 @@ static void check_parms(void)
1303 } 1290 }
1304 } 1291 }
1305 if (do_nmi && !nmi_handler_registered) { 1292 if (do_nmi && !nmi_handler_registered) {
1306 rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, 1293 rv = register_die_notifier(&ipmi_nmi_handler);
1307 "ipmi");
1308 if (rv) { 1294 if (rv) {
1309 printk(KERN_WARNING PFX 1295 printk(KERN_WARNING PFX
1310 "Can't register nmi handler\n"); 1296 "Can't register nmi handler\n");
@@ -1312,7 +1298,7 @@ static void check_parms(void)
1312 } else 1298 } else
1313 nmi_handler_registered = 1; 1299 nmi_handler_registered = 1;
1314 } else if (!do_nmi && nmi_handler_registered) { 1300 } else if (!do_nmi && nmi_handler_registered) {
1315 unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); 1301 unregister_die_notifier(&ipmi_nmi_handler);
1316 nmi_handler_registered = 0; 1302 nmi_handler_registered = 0;
1317 } 1303 }
1318#endif 1304#endif
@@ -1350,7 +1336,7 @@ static int __init ipmi_wdog_init(void)
1350 if (rv) { 1336 if (rv) {
1351#ifdef HAVE_DIE_NMI 1337#ifdef HAVE_DIE_NMI
1352 if (nmi_handler_registered) 1338 if (nmi_handler_registered)
1353 unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); 1339 unregister_die_notifier(&ipmi_nmi_handler);
1354#endif 1340#endif
1355 atomic_notifier_chain_unregister(&panic_notifier_list, 1341 atomic_notifier_chain_unregister(&panic_notifier_list,
1356 &wdog_panic_notifier); 1342 &wdog_panic_notifier);
@@ -1371,7 +1357,7 @@ static void __exit ipmi_wdog_exit(void)
1371 1357
1372#ifdef HAVE_DIE_NMI 1358#ifdef HAVE_DIE_NMI
1373 if (nmi_handler_registered) 1359 if (nmi_handler_registered)
1374 unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); 1360 unregister_die_notifier(&ipmi_nmi_handler);
1375#endif 1361#endif
1376 1362
1377 atomic_notifier_chain_unregister(&panic_notifier_list, 1363 atomic_notifier_chain_unregister(&panic_notifier_list,
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index a741e418b45..97c3edb95ae 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -135,6 +135,7 @@
135 135
136#include <asm/irq.h> 136#include <asm/irq.h>
137#include <asm/uaccess.h> 137#include <asm/uaccess.h>
138#include <asm/system.h>
138 139
139/* if you have more than 8 printers, remember to increase LP_NO */ 140/* if you have more than 8 printers, remember to increase LP_NO */
140#define LP_NO 8 141#define LP_NO 8
@@ -705,13 +706,16 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
705{ 706{
706 unsigned int minor; 707 unsigned int minor;
707 struct timeval par_timeout; 708 struct timeval par_timeout;
709 struct compat_timeval __user *tc;
708 int ret; 710 int ret;
709 711
710 minor = iminor(file->f_path.dentry->d_inode); 712 minor = iminor(file->f_path.dentry->d_inode);
711 mutex_lock(&lp_mutex); 713 mutex_lock(&lp_mutex);
712 switch (cmd) { 714 switch (cmd) {
713 case LPSETTIMEOUT: 715 case LPSETTIMEOUT:
714 if (compat_get_timeval(&par_timeout, compat_ptr(arg))) { 716 tc = compat_ptr(arg);
717 if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
718 get_user(par_timeout.tv_usec, &tc->tv_usec)) {
715 ret = -EFAULT; 719 ret = -EFAULT;
716 break; 720 break;
717 } 721 }
@@ -825,7 +829,7 @@ static struct console lpcons = {
825 829
826static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC }; 830static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
827static char *parport[LP_NO]; 831static char *parport[LP_NO];
828static bool reset; 832static int reset;
829 833
830module_param_array(parport, charp, NULL, 0); 834module_param_array(parport, charp, NULL, 0);
831module_param(reset, bool, 0); 835module_param(reset, bool, 0);
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index e5d3e3f7a49..1aeaaba680d 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/system.h>
31#include <asm/pgtable.h> 32#include <asm/pgtable.h>
32#include <asm/sn/addrs.h> 33#include <asm/sn/addrs.h>
33#include <asm/sn/intr.h> 34#include <asm/sn/intr.h>
@@ -507,7 +508,7 @@ static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
507 508
508 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 509 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
509 510
510 /* Remap-pfn-range will mark the range VM_IO */ 511 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
511 if (remap_pfn_range(vma, 512 if (remap_pfn_range(vma,
512 vma->vm_start, 513 vma->vm_start,
513 __pa(soft->gscr_addr) >> PAGE_SHIFT, 514 __pa(soft->gscr_addr) >> PAGE_SHIFT,
@@ -799,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
799 return 0; 800 return 0;
800} 801}
801 802
802static const struct cx_device_id mbcs_id_table[] = { 803static const struct cx_device_id __devinitdata mbcs_id_table[] = {
803 { 804 {
804 .part_num = MBCS_PART_NUM, 805 .part_num = MBCS_PART_NUM,
805 .mfg_num = MBCS_MFG_NUM, 806 .mfg_num = MBCS_MFG_NUM,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa..9b1eb188acd 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -26,17 +26,14 @@
26#include <linux/bootmem.h> 26#include <linux/bootmem.h>
27#include <linux/splice.h> 27#include <linux/splice.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/export.h>
30#include <linux/io.h>
31 29
32#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/io.h>
33 32
34#ifdef CONFIG_IA64 33#ifdef CONFIG_IA64
35# include <linux/efi.h> 34# include <linux/efi.h>
36#endif 35#endif
37 36
38#define DEVPORT_MINOR 4
39
40static inline unsigned long size_inside_page(unsigned long start, 37static inline unsigned long size_inside_page(unsigned long start,
41 unsigned long size) 38 unsigned long size)
42{ 39{
@@ -48,7 +45,7 @@ static inline unsigned long size_inside_page(unsigned long start,
48} 45}
49 46
50#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) 48static inline int valid_phys_addr_range(unsigned long addr, size_t count)
52{ 49{
53 return addr + count <= __pa(high_memory); 50 return addr + count <= __pa(high_memory);
54} 51}
@@ -59,6 +56,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
59} 56}
60#endif 57#endif
61 58
59#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
62#ifdef CONFIG_STRICT_DEVMEM 60#ifdef CONFIG_STRICT_DEVMEM
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 61static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 62{
@@ -84,7 +82,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84 return 1; 82 return 1;
85} 83}
86#endif 84#endif
85#endif
87 86
87#ifdef CONFIG_DEVMEM
88void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) 88void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
89{ 89{
90} 90}
@@ -96,7 +96,7 @@ void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
96static ssize_t read_mem(struct file *file, char __user *buf, 96static ssize_t read_mem(struct file *file, char __user *buf,
97 size_t count, loff_t *ppos) 97 size_t count, loff_t *ppos)
98{ 98{
99 phys_addr_t p = *ppos; 99 unsigned long p = *ppos;
100 ssize_t read, sz; 100 ssize_t read, sz;
101 char *ptr; 101 char *ptr;
102 102
@@ -153,7 +153,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
153static ssize_t write_mem(struct file *file, const char __user *buf, 153static ssize_t write_mem(struct file *file, const char __user *buf,
154 size_t count, loff_t *ppos) 154 size_t count, loff_t *ppos)
155{ 155{
156 phys_addr_t p = *ppos; 156 unsigned long p = *ppos;
157 ssize_t written, sz; 157 ssize_t written, sz;
158 unsigned long copied; 158 unsigned long copied;
159 void *ptr; 159 void *ptr;
@@ -211,6 +211,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
211 *ppos += written; 211 *ppos += written;
212 return written; 212 return written;
213} 213}
214#endif /* CONFIG_DEVMEM */
215
216#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
214 217
215int __weak phys_mem_access_prot_allowed(struct file *file, 218int __weak phys_mem_access_prot_allowed(struct file *file,
216 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 219 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -226,7 +229,7 @@ int __weak phys_mem_access_prot_allowed(struct file *file,
226 * 229 *
227 */ 230 */
228#ifdef pgprot_noncached 231#ifdef pgprot_noncached
229static int uncached_access(struct file *file, phys_addr_t addr) 232static int uncached_access(struct file *file, unsigned long addr)
230{ 233{
231#if defined(CONFIG_IA64) 234#if defined(CONFIG_IA64)
232 /* 235 /*
@@ -258,7 +261,7 @@ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
258 unsigned long size, pgprot_t vma_prot) 261 unsigned long size, pgprot_t vma_prot)
259{ 262{
260#ifdef pgprot_noncached 263#ifdef pgprot_noncached
261 phys_addr_t offset = pfn << PAGE_SHIFT; 264 unsigned long offset = pfn << PAGE_SHIFT;
262 265
263 if (uncached_access(file, offset)) 266 if (uncached_access(file, offset))
264 return pgprot_noncached(vma_prot); 267 return pgprot_noncached(vma_prot);
@@ -322,7 +325,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
322 325
323 vma->vm_ops = &mmap_mem_ops; 326 vma->vm_ops = &mmap_mem_ops;
324 327
325 /* Remap-pfn-range will mark the range VM_IO */ 328 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
326 if (remap_pfn_range(vma, 329 if (remap_pfn_range(vma,
327 vma->vm_start, 330 vma->vm_start,
328 vma->vm_pgoff, 331 vma->vm_pgoff,
@@ -332,6 +335,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
332 } 335 }
333 return 0; 336 return 0;
334} 337}
338#endif /* CONFIG_DEVMEM */
335 339
336#ifdef CONFIG_DEVKMEM 340#ifdef CONFIG_DEVKMEM
337static int mmap_kmem(struct file *file, struct vm_area_struct *vma) 341static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -696,6 +700,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
696 return file->f_pos = 0; 700 return file->f_pos = 0;
697} 701}
698 702
703#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
704
699/* 705/*
700 * The memory devices use the full 32/64 bits of the offset, and so we cannot 706 * The memory devices use the full 32/64 bits of the offset, and so we cannot
701 * check against negative addresses: they are ok. The return value is weird, 707 * check against negative addresses: they are ok. The return value is weird,
@@ -729,10 +735,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
729 return ret; 735 return ret;
730} 736}
731 737
738#endif
739
740#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
732static int open_port(struct inode * inode, struct file * filp) 741static int open_port(struct inode * inode, struct file * filp)
733{ 742{
734 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 743 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
735} 744}
745#endif
736 746
737#define zero_lseek null_lseek 747#define zero_lseek null_lseek
738#define full_lseek null_lseek 748#define full_lseek null_lseek
@@ -742,6 +752,7 @@ static int open_port(struct inode * inode, struct file * filp)
742#define open_kmem open_mem 752#define open_kmem open_mem
743#define open_oldmem open_mem 753#define open_oldmem open_mem
744 754
755#ifdef CONFIG_DEVMEM
745static const struct file_operations mem_fops = { 756static const struct file_operations mem_fops = {
746 .llseek = memory_lseek, 757 .llseek = memory_lseek,
747 .read = read_mem, 758 .read = read_mem,
@@ -750,6 +761,7 @@ static const struct file_operations mem_fops = {
750 .open = open_mem, 761 .open = open_mem,
751 .get_unmapped_area = get_unmapped_area_mem, 762 .get_unmapped_area = get_unmapped_area_mem,
752}; 763};
764#endif
753 765
754#ifdef CONFIG_DEVKMEM 766#ifdef CONFIG_DEVKMEM
755static const struct file_operations kmem_fops = { 767static const struct file_operations kmem_fops = {
@@ -809,13 +821,53 @@ static const struct file_operations oldmem_fops = {
809}; 821};
810#endif 822#endif
811 823
824static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
825 unsigned long count, loff_t pos)
826{
827 char *line, *p;
828 int i;
829 ssize_t ret = -EFAULT;
830 size_t len = iov_length(iv, count);
831
832 line = kmalloc(len + 1, GFP_KERNEL);
833 if (line == NULL)
834 return -ENOMEM;
835
836 /*
837 * copy all vectors into a single string, to ensure we do
838 * not interleave our log line with other printk calls
839 */
840 p = line;
841 for (i = 0; i < count; i++) {
842 if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
843 goto out;
844 p += iv[i].iov_len;
845 }
846 p[0] = '\0';
847
848 ret = printk("%s", line);
849 /* printk can add a prefix */
850 if (ret > len)
851 ret = len;
852out:
853 kfree(line);
854 return ret;
855}
856
857static const struct file_operations kmsg_fops = {
858 .aio_write = kmsg_writev,
859 .llseek = noop_llseek,
860};
861
812static const struct memdev { 862static const struct memdev {
813 const char *name; 863 const char *name;
814 umode_t mode; 864 mode_t mode;
815 const struct file_operations *fops; 865 const struct file_operations *fops;
816 struct backing_dev_info *dev_info; 866 struct backing_dev_info *dev_info;
817} devlist[] = { 867} devlist[] = {
868#ifdef CONFIG_DEVMEM
818 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, 869 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
870#endif
819#ifdef CONFIG_DEVKMEM 871#ifdef CONFIG_DEVKMEM
820 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, 872 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
821#endif 873#endif
@@ -827,9 +879,7 @@ static const struct memdev {
827 [7] = { "full", 0666, &full_fops, NULL }, 879 [7] = { "full", 0666, &full_fops, NULL },
828 [8] = { "random", 0666, &random_fops, NULL }, 880 [8] = { "random", 0666, &random_fops, NULL },
829 [9] = { "urandom", 0666, &urandom_fops, NULL }, 881 [9] = { "urandom", 0666, &urandom_fops, NULL },
830#ifdef CONFIG_PRINTK 882 [11] = { "kmsg", 0, &kmsg_fops, NULL },
831 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
832#endif
833#ifdef CONFIG_CRASH_DUMP 883#ifdef CONFIG_CRASH_DUMP
834 [12] = { "oldmem", 0, &oldmem_fops, NULL }, 884 [12] = { "oldmem", 0, &oldmem_fops, NULL },
835#endif 885#endif
@@ -867,7 +917,7 @@ static const struct file_operations memory_fops = {
867 .llseek = noop_llseek, 917 .llseek = noop_llseek,
868}; 918};
869 919
870static char *mem_devnode(struct device *dev, umode_t *mode) 920static char *mem_devnode(struct device *dev, mode_t *mode)
871{ 921{
872 if (mode && devlist[MINOR(dev->devt)].mode) 922 if (mode && devlist[MINOR(dev->devt)].mode)
873 *mode = devlist[MINOR(dev->devt)].mode; 923 *mode = devlist[MINOR(dev->devt)].mode;
@@ -896,13 +946,6 @@ static int __init chr_dev_init(void)
896 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { 946 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
897 if (!devlist[minor].name) 947 if (!devlist[minor].name)
898 continue; 948 continue;
899
900 /*
901 * Create /dev/port?
902 */
903 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
904 continue;
905
906 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), 949 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
907 NULL, devlist[minor].name); 950 NULL, devlist[minor].name);
908 } 951 }
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 522136d4084..778273c9324 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -258,7 +258,7 @@ int misc_deregister(struct miscdevice *misc)
258EXPORT_SYMBOL(misc_register); 258EXPORT_SYMBOL(misc_register);
259EXPORT_SYMBOL(misc_deregister); 259EXPORT_SYMBOL(misc_deregister);
260 260
261static char *misc_devnode(struct device *dev, umode_t *mode) 261static char *misc_devnode(struct device *dev, mode_t *mode)
262{ 262{
263 struct miscdevice *c = dev_get_drvdata(dev); 263 struct miscdevice *c = dev_get_drvdata(dev);
264 264
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 3d6c0671e99..33dc2298af7 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -826,7 +826,7 @@ static int __init mmtimer_init(void)
826 826
827 /* Allocate list of node ptrs to mmtimer_t's */ 827 /* Allocate list of node ptrs to mmtimer_t's */
828 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); 828 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
829 if (!timers) { 829 if (timers == NULL) {
830 printk(KERN_ERR "%s: failed to allocate memory for device\n", 830 printk(KERN_ERR "%s: failed to allocate memory for device\n",
831 MMTIMER_NAME); 831 MMTIMER_NAME);
832 goto out3; 832 goto out3;
@@ -848,6 +848,7 @@ static int __init mmtimer_init(void)
848 return 0; 848 return 0;
849 849
850out3: 850out3:
851 kfree(timers);
851 misc_deregister(&mmtimer_miscdev); 852 misc_deregister(&mmtimer_miscdev);
852out2: 853out2:
853 free_irq(SGI_MMTIMER_VECTOR, NULL); 854 free_irq(SGI_MMTIMER_VECTOR, NULL);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index e1f60f968fd..5c0d96a820f 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -44,6 +44,7 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/numa.h> 45#include <linux/numa.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/system.h>
47#include <asm/pgtable.h> 48#include <asm/pgtable.h>
48#include <linux/atomic.h> 49#include <linux/atomic.h>
49#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
@@ -283,10 +284,10 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
283 vdata->flags = flags; 284 vdata->flags = flags;
284 vdata->type = type; 285 vdata->type = type;
285 spin_lock_init(&vdata->lock); 286 spin_lock_init(&vdata->lock);
286 atomic_set(&vdata->refcnt, 1); 287 vdata->refcnt = ATOMIC_INIT(1);
287 vma->vm_private_data = vdata; 288 vma->vm_private_data = vdata;
288 289
289 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 290 vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
290 if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) 291 if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
291 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 292 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
292 vma->vm_ops = &mspec_vm_ops; 293 vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 881c9e59593..492dbfb2efd 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -56,6 +56,7 @@
56 56
57#include <asm/io.h> 57#include <asm/io.h>
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <asm/system.h>
59#include <asm/irq.h> 60#include <asm/irq.h>
60#include "smapi.h" 61#include "smapi.h"
61#include "mwavedd.h" 62#include "mwavedd.h"
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 164544afd68..1d82d5838f0 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -430,7 +430,7 @@ static ssize_t mwave_write(struct file *file, const char __user *buf,
430 430
431static int register_serial_portandirq(unsigned int port, int irq) 431static int register_serial_portandirq(unsigned int port, int irq)
432{ 432{
433 struct uart_8250_port uart; 433 struct uart_port uart;
434 434
435 switch ( port ) { 435 switch ( port ) {
436 case 0x3f8: 436 case 0x3f8:
@@ -462,14 +462,14 @@ static int register_serial_portandirq(unsigned int port, int irq)
462 } /* switch */ 462 } /* switch */
463 /* irq is okay */ 463 /* irq is okay */
464 464
465 memset(&uart, 0, sizeof(uart)); 465 memset(&uart, 0, sizeof(struct uart_port));
466 466
467 uart.port.uartclk = 1843200; 467 uart.uartclk = 1843200;
468 uart.port.iobase = port; 468 uart.iobase = port;
469 uart.port.irq = irq; 469 uart.irq = irq;
470 uart.port.iotype = UPIO_PORT; 470 uart.iotype = UPIO_PORT;
471 uart.port.flags = UPF_SHARE_IRQ; 471 uart.flags = UPF_SHARE_IRQ;
472 return serial8250_register_8250_port(&uart); 472 return serial8250_register_port(&uart);
473} 473}
474 474
475 475
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 9df78e2cc45..da3cfee782d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -94,7 +94,7 @@
94/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with 94/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
95 * rtc_lock held. Due to the index-port/data-port design of the RTC, we 95 * rtc_lock held. Due to the index-port/data-port design of the RTC, we
96 * don't want two different things trying to get to it at once. (e.g. the 96 * don't want two different things trying to get to it at once. (e.g. the
97 * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) 97 * periodic 11 min sync from time.c vs. this driver.)
98 */ 98 */
99 99
100#include <linux/types.h> 100#include <linux/types.h>
@@ -111,6 +111,7 @@
111#include <linux/uaccess.h> 111#include <linux/uaccess.h>
112#include <linux/mutex.h> 112#include <linux/mutex.h>
113 113
114#include <asm/system.h>
114 115
115static DEFINE_MUTEX(nvram_mutex); 116static DEFINE_MUTEX(nvram_mutex);
116static DEFINE_SPINLOCK(nvram_state_lock); 117static DEFINE_SPINLOCK(nvram_state_lock);
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index cfdfe493c6a..04a480f86c6 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -93,9 +93,9 @@ int button_del_callback (void (*callback) (void))
93 button_callback_list [lp].count = 0; 93 button_callback_list [lp].count = 0;
94 callback_count--; 94 callback_count--;
95 return 0; 95 return 0;
96 } 96 };
97 lp--; 97 lp--;
98 } 98 };
99 return -EINVAL; 99 return -EINVAL;
100} 100}
101 101
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index e371480d363..a12f52400db 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -30,7 +30,9 @@
30 30
31#include <asm/hardware/dec21285.h> 31#include <asm/hardware/dec21285.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/leds.h>
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
35#include <asm/system.h>
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35 37
36/*****************************************************************************/ 38/*****************************************************************************/
@@ -49,7 +51,7 @@ static int write_block(unsigned long p, const char __user *buf, int count);
49#define KFLASH_ID 0x89A6 //Intel flash 51#define KFLASH_ID 0x89A6 //Intel flash
50#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg 52#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
51 53
52static bool flashdebug; //if set - we will display progress msgs 54static int flashdebug; //if set - we will display progress msgs
53 55
54static int gbWriteEnable; 56static int gbWriteEnable;
55static int gbWriteBase64Enable; 57static int gbWriteBase64Enable;
@@ -178,6 +180,9 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
178 180
179 written = 0; 181 written = 0;
180 182
183 leds_event(led_claim);
184 leds_event(led_green_on);
185
181 nBlock = (int) p >> 16; //block # of 64K bytes 186 nBlock = (int) p >> 16; //block # of 64K bytes
182 187
183 /* 188 /*
@@ -254,6 +259,11 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
254 printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written); 259 printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
255 } 260 }
256 261
262 /*
263 * restore reg on exit
264 */
265 leds_event(led_release);
266
257 mutex_unlock(&nwflash_mutex); 267 mutex_unlock(&nwflash_mutex);
258 268
259 return written; 269 return written;
@@ -325,6 +335,11 @@ static int erase_block(int nBlock)
325 int temp, temp1; 335 int temp, temp1;
326 336
327 /* 337 /*
338 * orange LED == erase
339 */
340 leds_event(led_amber_on);
341
342 /*
328 * reset footbridge to the correct offset 0 (...0..3) 343 * reset footbridge to the correct offset 0 (...0..3)
329 */ 344 */
330 *CSR_ROMWRITEREG = 0; 345 *CSR_ROMWRITEREG = 0;
@@ -432,6 +447,12 @@ static int write_block(unsigned long p, const char __user *buf, int count)
432 unsigned long timeout; 447 unsigned long timeout;
433 unsigned long timeout1; 448 unsigned long timeout1;
434 449
450 /*
451 * red LED == write
452 */
453 leds_event(led_amber_off);
454 leds_event(led_red_on);
455
435 pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); 456 pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
436 457
437 /* 458 /*
@@ -538,9 +559,17 @@ static int write_block(unsigned long p, const char __user *buf, int count)
538 pWritePtr - FLASH_BASE); 559 pWritePtr - FLASH_BASE);
539 560
540 /* 561 /*
562 * no LED == waiting
563 */
564 leds_event(led_amber_off);
565 /*
541 * wait couple ms 566 * wait couple ms
542 */ 567 */
543 msleep(10); 568 msleep(10);
569 /*
570 * red LED == write
571 */
572 leds_event(led_red_on);
544 573
545 goto WriteRetry; 574 goto WriteRetry;
546 } else { 575 } else {
@@ -555,6 +584,12 @@ static int write_block(unsigned long p, const char __user *buf, int count)
555 } 584 }
556 } 585 }
557 586
587 /*
588 * green LED == read/verify
589 */
590 leds_event(led_amber_off);
591 leds_event(led_green_on);
592
558 msleep(10); 593 msleep(10);
559 594
560 pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); 595 pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
@@ -583,9 +618,9 @@ static void kick_open(void)
583 * we want to write a bit pattern XXX1 to Xilinx to enable 618 * we want to write a bit pattern XXX1 to Xilinx to enable
584 * the write gate, which will be open for about the next 2ms. 619 * the write gate, which will be open for about the next 2ms.
585 */ 620 */
586 raw_spin_lock_irqsave(&nw_gpio_lock, flags); 621 spin_lock_irqsave(&nw_gpio_lock, flags);
587 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); 622 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
588 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 623 spin_unlock_irqrestore(&nw_gpio_lock, flags);
589 624
590 /* 625 /*
591 * let the ISA bus to catch on... 626 * let the ISA bus to catch on...
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 3f79a9fb6b1..b304ec05250 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -345,7 +345,8 @@ static void __exit pc8736x_gpio_cleanup(void)
345 unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT); 345 unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
346 release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); 346 release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
347 347
348 platform_device_unregister(pdev); 348 platform_device_del(pdev);
349 platform_device_put(pdev);
349} 350}
350 351
351module_init(pc8736x_gpio_init); 352module_init(pc8736x_gpio_init);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b66eaa04f8c..15781396af2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -60,6 +60,7 @@
60#include <linux/ioctl.h> 60#include <linux/ioctl.h>
61#include <linux/synclink.h> 61#include <linux/synclink.h>
62 62
63#include <asm/system.h>
63#include <asm/io.h> 64#include <asm/io.h>
64#include <asm/irq.h> 65#include <asm/irq.h>
65#include <asm/dma.h> 66#include <asm/dma.h>
@@ -438,7 +439,7 @@ static int mgslpc_device_count = 0;
438 * .text section address and breakpoint on module load. 439 * .text section address and breakpoint on module load.
439 * This is useful for use with gdb and add-symbol-file command. 440 * This is useful for use with gdb and add-symbol-file command.
440 */ 441 */
441static bool break_on_load=0; 442static int break_on_load=0;
442 443
443/* 444/*
444 * Driver major number, defaults to zero to get auto 445 * Driver major number, defaults to zero to get auto
@@ -549,10 +550,8 @@ static int mgslpc_probe(struct pcmcia_device *link)
549 /* Initialize the struct pcmcia_device structure */ 550 /* Initialize the struct pcmcia_device structure */
550 551
551 ret = mgslpc_config(link); 552 ret = mgslpc_config(link);
552 if (ret) { 553 if (ret)
553 tty_port_destroy(&info->port);
554 return ret; 554 return ret;
555 }
556 555
557 mgslpc_add_device(info); 556 mgslpc_add_device(info);
558 557
@@ -893,14 +892,6 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
893 int work = 0; 892 int work = 0;
894 struct mgsl_icount *icount = &info->icount; 893 struct mgsl_icount *icount = &info->icount;
895 894
896 if (!tty) {
897 /* tty is not available anymore */
898 issue_command(info, CHA, CMD_RXRESET);
899 if (debug_level >= DEBUG_LEVEL_ISR)
900 printk("%s(%d):rx_ready_async(tty=NULL)\n",__FILE__,__LINE__);
901 return;
902 }
903
904 if (tcd) { 895 if (tcd) {
905 /* early termination, get FIFO count from RBCL register */ 896 /* early termination, get FIFO count from RBCL register */
906 fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); 897 fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
@@ -990,7 +981,7 @@ static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
990 else 981 else
991#endif 982#endif
992 { 983 {
993 if (tty && (tty->stopped || tty->hw_stopped)) { 984 if (tty->stopped || tty->hw_stopped) {
994 tx_stop(info); 985 tx_stop(info);
995 return; 986 return;
996 } 987 }
@@ -1010,7 +1001,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
1010 if (!info->tx_active) 1001 if (!info->tx_active)
1011 return; 1002 return;
1012 } else { 1003 } else {
1013 if (tty && (tty->stopped || tty->hw_stopped)) { 1004 if (tty->stopped || tty->hw_stopped) {
1014 tx_stop(info); 1005 tx_stop(info);
1015 return; 1006 return;
1016 } 1007 }
@@ -1060,12 +1051,13 @@ static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty)
1060 wake_up_interruptible(&info->status_event_wait_q); 1051 wake_up_interruptible(&info->status_event_wait_q);
1061 wake_up_interruptible(&info->event_wait_q); 1052 wake_up_interruptible(&info->event_wait_q);
1062 1053
1063 if (tty && tty_port_cts_enabled(&info->port)) { 1054 if (info->port.flags & ASYNC_CTS_FLOW) {
1064 if (tty->hw_stopped) { 1055 if (tty->hw_stopped) {
1065 if (info->serial_signals & SerialSignal_CTS) { 1056 if (info->serial_signals & SerialSignal_CTS) {
1066 if (debug_level >= DEBUG_LEVEL_ISR) 1057 if (debug_level >= DEBUG_LEVEL_ISR)
1067 printk("CTS tx start..."); 1058 printk("CTS tx start...");
1068 tty->hw_stopped = 0; 1059 if (tty)
1060 tty->hw_stopped = 0;
1069 tx_start(info, tty); 1061 tx_start(info, tty);
1070 info->pending_bh |= BH_TRANSMIT; 1062 info->pending_bh |= BH_TRANSMIT;
1071 return; 1063 return;
@@ -1074,7 +1066,8 @@ static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty)
1074 if (!(info->serial_signals & SerialSignal_CTS)) { 1066 if (!(info->serial_signals & SerialSignal_CTS)) {
1075 if (debug_level >= DEBUG_LEVEL_ISR) 1067 if (debug_level >= DEBUG_LEVEL_ISR)
1076 printk("CTS tx stop..."); 1068 printk("CTS tx stop...");
1077 tty->hw_stopped = 1; 1069 if (tty)
1070 tty->hw_stopped = 1;
1078 tx_stop(info); 1071 tx_stop(info);
1079 } 1072 }
1080 } 1073 }
@@ -1352,7 +1345,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
1352 /* TODO:disable interrupts instead of reset to preserve signal states */ 1345 /* TODO:disable interrupts instead of reset to preserve signal states */
1353 reset_device(info); 1346 reset_device(info);
1354 1347
1355 if (!tty || tty->termios.c_cflag & HUPCL) { 1348 if (!tty || tty->termios->c_cflag & HUPCL) {
1356 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1349 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1357 set_signals(info); 1350 set_signals(info);
1358 } 1351 }
@@ -1393,7 +1386,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
1393 port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); 1386 port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
1394 get_signals(info); 1387 get_signals(info);
1395 1388
1396 if (info->netcount || (tty && (tty->termios.c_cflag & CREAD))) 1389 if (info->netcount || (tty && (tty->termios->c_cflag & CREAD)))
1397 rx_start(info); 1390 rx_start(info);
1398 1391
1399 spin_unlock_irqrestore(&info->lock,flags); 1392 spin_unlock_irqrestore(&info->lock,flags);
@@ -1406,14 +1399,14 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
1406 unsigned cflag; 1399 unsigned cflag;
1407 int bits_per_char; 1400 int bits_per_char;
1408 1401
1409 if (!tty) 1402 if (!tty || !tty->termios)
1410 return; 1403 return;
1411 1404
1412 if (debug_level >= DEBUG_LEVEL_INFO) 1405 if (debug_level >= DEBUG_LEVEL_INFO)
1413 printk("%s(%d):mgslpc_change_params(%s)\n", 1406 printk("%s(%d):mgslpc_change_params(%s)\n",
1414 __FILE__,__LINE__, info->device_name ); 1407 __FILE__,__LINE__, info->device_name );
1415 1408
1416 cflag = tty->termios.c_cflag; 1409 cflag = tty->termios->c_cflag;
1417 1410
1418 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1411 /* if B0 rate (hangup) specified then negate DTR and RTS */
1419 /* otherwise assert DTR and RTS */ 1412 /* otherwise assert DTR and RTS */
@@ -1736,7 +1729,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
1736 if (I_IXOFF(tty)) 1729 if (I_IXOFF(tty))
1737 mgslpc_send_xchar(tty, STOP_CHAR(tty)); 1730 mgslpc_send_xchar(tty, STOP_CHAR(tty));
1738 1731
1739 if (tty->termios.c_cflag & CRTSCTS) { 1732 if (tty->termios->c_cflag & CRTSCTS) {
1740 spin_lock_irqsave(&info->lock,flags); 1733 spin_lock_irqsave(&info->lock,flags);
1741 info->serial_signals &= ~SerialSignal_RTS; 1734 info->serial_signals &= ~SerialSignal_RTS;
1742 set_signals(info); 1735 set_signals(info);
@@ -1765,7 +1758,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
1765 mgslpc_send_xchar(tty, START_CHAR(tty)); 1758 mgslpc_send_xchar(tty, START_CHAR(tty));
1766 } 1759 }
1767 1760
1768 if (tty->termios.c_cflag & CRTSCTS) { 1761 if (tty->termios->c_cflag & CRTSCTS) {
1769 spin_lock_irqsave(&info->lock,flags); 1762 spin_lock_irqsave(&info->lock,flags);
1770 info->serial_signals |= SerialSignal_RTS; 1763 info->serial_signals |= SerialSignal_RTS;
1771 set_signals(info); 1764 set_signals(info);
@@ -2301,8 +2294,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2301 tty->driver->name ); 2294 tty->driver->name );
2302 2295
2303 /* just return if nothing has changed */ 2296 /* just return if nothing has changed */
2304 if ((tty->termios.c_cflag == old_termios->c_cflag) 2297 if ((tty->termios->c_cflag == old_termios->c_cflag)
2305 && (RELEVANT_IFLAG(tty->termios.c_iflag) 2298 && (RELEVANT_IFLAG(tty->termios->c_iflag)
2306 == RELEVANT_IFLAG(old_termios->c_iflag))) 2299 == RELEVANT_IFLAG(old_termios->c_iflag)))
2307 return; 2300 return;
2308 2301
@@ -2310,7 +2303,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2310 2303
2311 /* Handle transition to B0 status */ 2304 /* Handle transition to B0 status */
2312 if (old_termios->c_cflag & CBAUD && 2305 if (old_termios->c_cflag & CBAUD &&
2313 !(tty->termios.c_cflag & CBAUD)) { 2306 !(tty->termios->c_cflag & CBAUD)) {
2314 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 2307 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
2315 spin_lock_irqsave(&info->lock,flags); 2308 spin_lock_irqsave(&info->lock,flags);
2316 set_signals(info); 2309 set_signals(info);
@@ -2319,9 +2312,9 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2319 2312
2320 /* Handle transition away from B0 status */ 2313 /* Handle transition away from B0 status */
2321 if (!(old_termios->c_cflag & CBAUD) && 2314 if (!(old_termios->c_cflag & CBAUD) &&
2322 tty->termios.c_cflag & CBAUD) { 2315 tty->termios->c_cflag & CBAUD) {
2323 info->serial_signals |= SerialSignal_DTR; 2316 info->serial_signals |= SerialSignal_DTR;
2324 if (!(tty->termios.c_cflag & CRTSCTS) || 2317 if (!(tty->termios->c_cflag & CRTSCTS) ||
2325 !test_bit(TTY_THROTTLED, &tty->flags)) { 2318 !test_bit(TTY_THROTTLED, &tty->flags)) {
2326 info->serial_signals |= SerialSignal_RTS; 2319 info->serial_signals |= SerialSignal_RTS;
2327 } 2320 }
@@ -2332,7 +2325,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2332 2325
2333 /* Handle turning off CRTSCTS */ 2326 /* Handle turning off CRTSCTS */
2334 if (old_termios->c_cflag & CRTSCTS && 2327 if (old_termios->c_cflag & CRTSCTS &&
2335 !(tty->termios.c_cflag & CRTSCTS)) { 2328 !(tty->termios->c_cflag & CRTSCTS)) {
2336 tty->hw_stopped = 0; 2329 tty->hw_stopped = 0;
2337 tx_release(tty); 2330 tx_release(tty);
2338 } 2331 }
@@ -2491,7 +2484,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
2491 2484
2492 /* verify range of specified line number */ 2485 /* verify range of specified line number */
2493 line = tty->index; 2486 line = tty->index;
2494 if (line >= mgslpc_device_count) { 2487 if ((line < 0) || (line >= mgslpc_device_count)) {
2495 printk("%s(%d):mgslpc_open with invalid line #%d.\n", 2488 printk("%s(%d):mgslpc_open with invalid line #%d.\n",
2496 __FILE__,__LINE__,line); 2489 __FILE__,__LINE__,line);
2497 return -ENODEV; 2490 return -ENODEV;
@@ -2739,8 +2732,6 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
2739#if SYNCLINK_GENERIC_HDLC 2732#if SYNCLINK_GENERIC_HDLC
2740 hdlcdev_init(info); 2733 hdlcdev_init(info);
2741#endif 2734#endif
2742 tty_port_register_device(&info->port, serial_driver, info->line,
2743 &info->p_dev->dev);
2744} 2735}
2745 2736
2746static void mgslpc_remove_device(MGSLPC_INFO *remove_info) 2737static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -2754,12 +2745,10 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
2754 last->next_device = info->next_device; 2745 last->next_device = info->next_device;
2755 else 2746 else
2756 mgslpc_device_list = info->next_device; 2747 mgslpc_device_list = info->next_device;
2757 tty_unregister_device(serial_driver, info->line);
2758#if SYNCLINK_GENERIC_HDLC 2748#if SYNCLINK_GENERIC_HDLC
2759 hdlcdev_exit(info); 2749 hdlcdev_exit(info);
2760#endif 2750#endif
2761 release_resources(info); 2751 release_resources(info);
2762 tty_port_destroy(&info->port);
2763 kfree(info); 2752 kfree(info);
2764 mgslpc_device_count--; 2753 mgslpc_device_count--;
2765 return; 2754 return;
@@ -2810,63 +2799,78 @@ static const struct tty_operations mgslpc_ops = {
2810 .proc_fops = &mgslpc_proc_fops, 2799 .proc_fops = &mgslpc_proc_fops,
2811}; 2800};
2812 2801
2813static int __init synclink_cs_init(void) 2802static void synclink_cs_cleanup(void)
2814{ 2803{
2815 int rc; 2804 int rc;
2816 2805
2817 if (break_on_load) { 2806 while(mgslpc_device_list)
2818 mgslpc_get_text_ptr(); 2807 mgslpc_remove_device(mgslpc_device_list);
2819 BREAKPOINT();
2820 }
2821 2808
2822 serial_driver = tty_alloc_driver(MAX_DEVICE_COUNT, 2809 if (serial_driver) {
2823 TTY_DRIVER_REAL_RAW | 2810 if ((rc = tty_unregister_driver(serial_driver)))
2824 TTY_DRIVER_DYNAMIC_DEV); 2811 printk("%s(%d) failed to unregister tty driver err=%d\n",
2825 if (IS_ERR(serial_driver)) { 2812 __FILE__,__LINE__,rc);
2826 rc = PTR_ERR(serial_driver); 2813 put_tty_driver(serial_driver);
2827 goto err;
2828 } 2814 }
2829 2815
2830 /* Initialize the tty_driver structure */ 2816 pcmcia_unregister_driver(&mgslpc_driver);
2831 serial_driver->driver_name = "synclink_cs"; 2817}
2832 serial_driver->name = "ttySLP";
2833 serial_driver->major = ttymajor;
2834 serial_driver->minor_start = 64;
2835 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
2836 serial_driver->subtype = SERIAL_TYPE_NORMAL;
2837 serial_driver->init_termios = tty_std_termios;
2838 serial_driver->init_termios.c_cflag =
2839 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
2840 tty_set_operations(serial_driver, &mgslpc_ops);
2841
2842 rc = tty_register_driver(serial_driver);
2843 if (rc < 0) {
2844 printk(KERN_ERR "%s(%d):Couldn't register serial driver\n",
2845 __FILE__, __LINE__);
2846 goto err_put_tty;
2847 }
2848 2818
2849 rc = pcmcia_register_driver(&mgslpc_driver); 2819static int __init synclink_cs_init(void)
2850 if (rc < 0) 2820{
2851 goto err_unreg_tty; 2821 int rc;
2852 2822
2853 printk(KERN_INFO "%s %s, tty major#%d\n", driver_name, driver_version, 2823 if (break_on_load) {
2854 serial_driver->major); 2824 mgslpc_get_text_ptr();
2825 BREAKPOINT();
2826 }
2855 2827
2856 return 0; 2828 if ((rc = pcmcia_register_driver(&mgslpc_driver)) < 0)
2857err_unreg_tty: 2829 return rc;
2858 tty_unregister_driver(serial_driver); 2830
2859err_put_tty: 2831 serial_driver = alloc_tty_driver(MAX_DEVICE_COUNT);
2860 put_tty_driver(serial_driver); 2832 if (!serial_driver) {
2861err: 2833 rc = -ENOMEM;
2862 return rc; 2834 goto error;
2835 }
2836
2837 /* Initialize the tty_driver structure */
2838
2839 serial_driver->owner = THIS_MODULE;
2840 serial_driver->driver_name = "synclink_cs";
2841 serial_driver->name = "ttySLP";
2842 serial_driver->major = ttymajor;
2843 serial_driver->minor_start = 64;
2844 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
2845 serial_driver->subtype = SERIAL_TYPE_NORMAL;
2846 serial_driver->init_termios = tty_std_termios;
2847 serial_driver->init_termios.c_cflag =
2848 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
2849 serial_driver->flags = TTY_DRIVER_REAL_RAW;
2850 tty_set_operations(serial_driver, &mgslpc_ops);
2851
2852 if ((rc = tty_register_driver(serial_driver)) < 0) {
2853 printk("%s(%d):Couldn't register serial driver\n",
2854 __FILE__,__LINE__);
2855 put_tty_driver(serial_driver);
2856 serial_driver = NULL;
2857 goto error;
2858 }
2859
2860 printk("%s %s, tty major#%d\n",
2861 driver_name, driver_version,
2862 serial_driver->major);
2863
2864 return 0;
2865
2866error:
2867 synclink_cs_cleanup();
2868 return rc;
2863} 2869}
2864 2870
2865static void __exit synclink_cs_exit(void) 2871static void __exit synclink_cs_exit(void)
2866{ 2872{
2867 pcmcia_unregister_driver(&mgslpc_driver); 2873 synclink_cs_cleanup();
2868 tty_unregister_driver(serial_driver);
2869 put_tty_driver(serial_driver);
2870} 2874}
2871 2875
2872module_init(synclink_cs_init); 2876module_init(synclink_cs_init);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 1cd49241e60..3fcf80ff12f 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -251,8 +251,12 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
251 break; 251 break;
252 } 252 }
253 253
254 if (signal_pending (current)) 254 if (signal_pending (current)) {
255 if (!bytes_written) {
256 bytes_written = -EINTR;
257 }
255 break; 258 break;
259 }
256 260
257 cond_resched(); 261 cond_resched();
258 } 262 }
@@ -779,8 +783,7 @@ static int __init ppdev_init (void)
779 err = PTR_ERR(ppdev_class); 783 err = PTR_ERR(ppdev_class);
780 goto out_chrdev; 784 goto out_chrdev;
781 } 785 }
782 err = parport_register_driver(&pp_driver); 786 if (parport_register_driver(&pp_driver)) {
783 if (err < 0) {
784 printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); 787 printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
785 goto out_class; 788 goto out_class;
786 } 789 }
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 588063ac951..d0c57c2e290 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -22,7 +22,6 @@
22#include <linux/miscdevice.h> 22#include <linux/miscdevice.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/module.h>
26 25
27#include <asm/lv1call.h> 26#include <asm/lv1call.h>
28#include <asm/ps3stor.h> 27#include <asm/ps3stor.h>
@@ -363,7 +362,7 @@ static struct miscdevice ps3flash_misc = {
363 .fops = &ps3flash_fops, 362 .fops = &ps3flash_fops,
364}; 363};
365 364
366static int ps3flash_probe(struct ps3_system_bus_device *_dev) 365static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
367{ 366{
368 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); 367 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
369 struct ps3flash_private *priv; 368 struct ps3flash_private *priv;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 85e81ec1451..c35a785005b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,26 +125,21 @@
125 * The current exported interfaces for gathering environmental noise 125 * The current exported interfaces for gathering environmental noise
126 * from the devices are: 126 * from the devices are:
127 * 127 *
128 * void add_device_randomness(const void *buf, unsigned int size);
129 * void add_input_randomness(unsigned int type, unsigned int code, 128 * void add_input_randomness(unsigned int type, unsigned int code,
130 * unsigned int value); 129 * unsigned int value);
131 * void add_interrupt_randomness(int irq, int irq_flags); 130 * void add_interrupt_randomness(int irq);
132 * void add_disk_randomness(struct gendisk *disk); 131 * void add_disk_randomness(struct gendisk *disk);
133 * 132 *
134 * add_device_randomness() is for adding data to the random pool that
135 * is likely to differ between two devices (or possibly even per boot).
136 * This would be things like MAC addresses or serial numbers, or the
137 * read-out of the RTC. This does *not* add any actual entropy to the
138 * pool, but it initializes the pool to different values for devices
139 * that might otherwise be identical and have very little entropy
140 * available to them (particularly common in the embedded world).
141 *
142 * add_input_randomness() uses the input layer interrupt timing, as well as 133 * add_input_randomness() uses the input layer interrupt timing, as well as
143 * the event type information from the hardware. 134 * the event type information from the hardware.
144 * 135 *
145 * add_interrupt_randomness() uses the interrupt timing as random 136 * add_interrupt_randomness() uses the inter-interrupt timing as random
146 * inputs to the entropy pool. Using the cycle counters and the irq source 137 * inputs to the entropy pool. Note that not all interrupts are good
147 * as inputs, it feeds the randomness roughly once a second. 138 * sources of randomness! For example, the timer interrupts is not a
139 * good choice, because the periodicity of the interrupts is too
140 * regular, and hence predictable to an attacker. Network Interface
141 * Controller interrupts are a better measure, since the timing of the
142 * NIC interrupts are more unpredictable.
148 * 143 *
149 * add_disk_randomness() uses what amounts to the seek time of block 144 * add_disk_randomness() uses what amounts to the seek time of block
150 * layer request events, on a per-disk_devt basis, as input to the 145 * layer request events, on a per-disk_devt basis, as input to the
@@ -253,8 +248,6 @@
253#include <linux/percpu.h> 248#include <linux/percpu.h>
254#include <linux/cryptohash.h> 249#include <linux/cryptohash.h>
255#include <linux/fips.h> 250#include <linux/fips.h>
256#include <linux/ptrace.h>
257#include <linux/kmemcheck.h>
258 251
259#ifdef CONFIG_GENERIC_HARDIRQS 252#ifdef CONFIG_GENERIC_HARDIRQS
260# include <linux/irq.h> 253# include <linux/irq.h>
@@ -263,12 +256,8 @@
263#include <asm/processor.h> 256#include <asm/processor.h>
264#include <asm/uaccess.h> 257#include <asm/uaccess.h>
265#include <asm/irq.h> 258#include <asm/irq.h>
266#include <asm/irq_regs.h>
267#include <asm/io.h> 259#include <asm/io.h>
268 260
269#define CREATE_TRACE_POINTS
270#include <trace/events/random.h>
271
272/* 261/*
273 * Configuration information 262 * Configuration information
274 */ 263 */
@@ -277,8 +266,6 @@
277#define SEC_XFER_SIZE 512 266#define SEC_XFER_SIZE 512
278#define EXTRACT_SIZE 10 267#define EXTRACT_SIZE 10
279 268
280#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
281
282/* 269/*
283 * The minimum number of bits of entropy before we wake up a read on 270 * The minimum number of bits of entropy before we wake up a read on
284 * /dev/random. Should be enough to do a significant reseed. 271 * /dev/random. Should be enough to do a significant reseed.
@@ -399,7 +386,8 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
399static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 386static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
400static struct fasync_struct *fasync; 387static struct fasync_struct *fasync;
401 388
402static bool debug; 389#if 0
390static int debug;
403module_param(debug, bool, 0644); 391module_param(debug, bool, 0644);
404#define DEBUG_ENT(fmt, arg...) do { \ 392#define DEBUG_ENT(fmt, arg...) do { \
405 if (debug) \ 393 if (debug) \
@@ -409,6 +397,9 @@ module_param(debug, bool, 0644);
409 blocking_pool.entropy_count,\ 397 blocking_pool.entropy_count,\
410 nonblocking_pool.entropy_count,\ 398 nonblocking_pool.entropy_count,\
411 ## arg); } while (0) 399 ## arg); } while (0)
400#else
401#define DEBUG_ENT(fmt, arg...) do {} while (0)
402#endif
412 403
413/********************************************************************** 404/**********************************************************************
414 * 405 *
@@ -429,11 +420,8 @@ struct entropy_store {
429 /* read-write data: */ 420 /* read-write data: */
430 spinlock_t lock; 421 spinlock_t lock;
431 unsigned add_ptr; 422 unsigned add_ptr;
432 unsigned input_rotate;
433 int entropy_count; 423 int entropy_count;
434 int entropy_total; 424 int input_rotate;
435 unsigned int initialized:1;
436 bool last_data_init;
437 __u8 last_data[EXTRACT_SIZE]; 425 __u8 last_data[EXTRACT_SIZE];
438}; 426};
439 427
@@ -466,10 +454,6 @@ static struct entropy_store nonblocking_pool = {
466 .pool = nonblocking_pool_data 454 .pool = nonblocking_pool_data
467}; 455};
468 456
469static __u32 const twist_table[8] = {
470 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
471 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
472
473/* 457/*
474 * This function adds bytes into the entropy "pool". It does not 458 * This function adds bytes into the entropy "pool". It does not
475 * update the entropy estimate. The caller should call 459 * update the entropy estimate. The caller should call
@@ -480,24 +464,29 @@ static __u32 const twist_table[8] = {
480 * it's cheap to do so and helps slightly in the expected case where 464 * it's cheap to do so and helps slightly in the expected case where
481 * the entropy is concentrated in the low-order bits. 465 * the entropy is concentrated in the low-order bits.
482 */ 466 */
483static void _mix_pool_bytes(struct entropy_store *r, const void *in, 467static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
484 int nbytes, __u8 out[64]) 468 int nbytes, __u8 out[64])
485{ 469{
470 static __u32 const twist_table[8] = {
471 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
472 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
486 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 473 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
487 int input_rotate; 474 int input_rotate;
488 int wordmask = r->poolinfo->poolwords - 1; 475 int wordmask = r->poolinfo->poolwords - 1;
489 const char *bytes = in; 476 const char *bytes = in;
490 __u32 w; 477 __u32 w;
478 unsigned long flags;
491 479
480 /* Taps are constant, so we can load them without holding r->lock. */
492 tap1 = r->poolinfo->tap1; 481 tap1 = r->poolinfo->tap1;
493 tap2 = r->poolinfo->tap2; 482 tap2 = r->poolinfo->tap2;
494 tap3 = r->poolinfo->tap3; 483 tap3 = r->poolinfo->tap3;
495 tap4 = r->poolinfo->tap4; 484 tap4 = r->poolinfo->tap4;
496 tap5 = r->poolinfo->tap5; 485 tap5 = r->poolinfo->tap5;
497 486
498 smp_rmb(); 487 spin_lock_irqsave(&r->lock, flags);
499 input_rotate = ACCESS_ONCE(r->input_rotate); 488 input_rotate = r->input_rotate;
500 i = ACCESS_ONCE(r->add_ptr); 489 i = r->add_ptr;
501 490
502 /* mix one byte at a time to simplify size handling and churn faster */ 491 /* mix one byte at a time to simplify size handling and churn faster */
503 while (nbytes--) { 492 while (nbytes--) {
@@ -524,61 +513,19 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
524 input_rotate += i ? 7 : 14; 513 input_rotate += i ? 7 : 14;
525 } 514 }
526 515
527 ACCESS_ONCE(r->input_rotate) = input_rotate; 516 r->input_rotate = input_rotate;
528 ACCESS_ONCE(r->add_ptr) = i; 517 r->add_ptr = i;
529 smp_wmb();
530 518
531 if (out) 519 if (out)
532 for (j = 0; j < 16; j++) 520 for (j = 0; j < 16; j++)
533 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; 521 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
534}
535
536static void __mix_pool_bytes(struct entropy_store *r, const void *in,
537 int nbytes, __u8 out[64])
538{
539 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
540 _mix_pool_bytes(r, in, nbytes, out);
541}
542
543static void mix_pool_bytes(struct entropy_store *r, const void *in,
544 int nbytes, __u8 out[64])
545{
546 unsigned long flags;
547 522
548 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
549 spin_lock_irqsave(&r->lock, flags);
550 _mix_pool_bytes(r, in, nbytes, out);
551 spin_unlock_irqrestore(&r->lock, flags); 523 spin_unlock_irqrestore(&r->lock, flags);
552} 524}
553 525
554struct fast_pool { 526static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
555 __u32 pool[4];
556 unsigned long last;
557 unsigned short count;
558 unsigned char rotate;
559 unsigned char last_timer_intr;
560};
561
562/*
563 * This is a fast mixing routine used by the interrupt randomness
564 * collector. It's hardcoded for an 128 bit pool and assumes that any
565 * locks that might be needed are taken by the caller.
566 */
567static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
568{ 527{
569 const char *bytes = in; 528 mix_pool_bytes_extract(r, in, bytes, NULL);
570 __u32 w;
571 unsigned i = f->count;
572 unsigned input_rotate = f->rotate;
573
574 while (nbytes--) {
575 w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
576 f->pool[(i + 1) & 3];
577 f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
578 input_rotate += (i++ & 3) ? 7 : 14;
579 }
580 f->count = i;
581 f->rotate = input_rotate;
582} 529}
583 530
584/* 531/*
@@ -586,38 +533,30 @@ static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
586 */ 533 */
587static void credit_entropy_bits(struct entropy_store *r, int nbits) 534static void credit_entropy_bits(struct entropy_store *r, int nbits)
588{ 535{
589 int entropy_count, orig; 536 unsigned long flags;
537 int entropy_count;
590 538
591 if (!nbits) 539 if (!nbits)
592 return; 540 return;
593 541
542 spin_lock_irqsave(&r->lock, flags);
543
594 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); 544 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
595retry: 545 entropy_count = r->entropy_count;
596 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
597 entropy_count += nbits; 546 entropy_count += nbits;
598
599 if (entropy_count < 0) { 547 if (entropy_count < 0) {
600 DEBUG_ENT("negative entropy/overflow\n"); 548 DEBUG_ENT("negative entropy/overflow\n");
601 entropy_count = 0; 549 entropy_count = 0;
602 } else if (entropy_count > r->poolinfo->POOLBITS) 550 } else if (entropy_count > r->poolinfo->POOLBITS)
603 entropy_count = r->poolinfo->POOLBITS; 551 entropy_count = r->poolinfo->POOLBITS;
604 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 552 r->entropy_count = entropy_count;
605 goto retry;
606
607 if (!r->initialized && nbits > 0) {
608 r->entropy_total += nbits;
609 if (r->entropy_total > 128)
610 r->initialized = 1;
611 }
612
613 trace_credit_entropy_bits(r->name, nbits, entropy_count,
614 r->entropy_total, _RET_IP_);
615 553
616 /* should we wake readers? */ 554 /* should we wake readers? */
617 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { 555 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
618 wake_up_interruptible(&random_read_wait); 556 wake_up_interruptible(&random_read_wait);
619 kill_fasync(&fasync, SIGIO, POLL_IN); 557 kill_fasync(&fasync, SIGIO, POLL_IN);
620 } 558 }
559 spin_unlock_irqrestore(&r->lock, flags);
621} 560}
622 561
623/********************************************************************* 562/*********************************************************************
@@ -633,24 +572,42 @@ struct timer_rand_state {
633 unsigned dont_count_entropy:1; 572 unsigned dont_count_entropy:1;
634}; 573};
635 574
636/* 575#ifndef CONFIG_GENERIC_HARDIRQS
637 * Add device- or boot-specific data to the input and nonblocking 576
638 * pools to help initialize them to unique values. 577static struct timer_rand_state *irq_timer_state[NR_IRQS];
639 * 578
640 * None of this adds any entropy, it is meant to avoid the 579static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
641 * problem of the nonblocking pool having similar initial state 580{
642 * across largely identical devices. 581 return irq_timer_state[irq];
643 */ 582}
644void add_device_randomness(const void *buf, unsigned int size) 583
584static void set_timer_rand_state(unsigned int irq,
585 struct timer_rand_state *state)
586{
587 irq_timer_state[irq] = state;
588}
589
590#else
591
592static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
645{ 593{
646 unsigned long time = get_cycles() ^ jiffies; 594 struct irq_desc *desc;
595
596 desc = irq_to_desc(irq);
647 597
648 mix_pool_bytes(&input_pool, buf, size, NULL); 598 return desc->timer_rand_state;
649 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
650 mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
651 mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
652} 599}
653EXPORT_SYMBOL(add_device_randomness); 600
601static void set_timer_rand_state(unsigned int irq,
602 struct timer_rand_state *state)
603{
604 struct irq_desc *desc;
605
606 desc = irq_to_desc(irq);
607
608 desc->timer_rand_state = state;
609}
610#endif
654 611
655static struct timer_rand_state input_timer_state; 612static struct timer_rand_state input_timer_state;
656 613
@@ -667,8 +624,8 @@ static struct timer_rand_state input_timer_state;
667static void add_timer_randomness(struct timer_rand_state *state, unsigned num) 624static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
668{ 625{
669 struct { 626 struct {
627 cycles_t cycles;
670 long jiffies; 628 long jiffies;
671 unsigned cycles;
672 unsigned num; 629 unsigned num;
673 } sample; 630 } sample;
674 long delta, delta2, delta3; 631 long delta, delta2, delta3;
@@ -682,7 +639,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
682 sample.jiffies = jiffies; 639 sample.jiffies = jiffies;
683 sample.cycles = get_cycles(); 640 sample.cycles = get_cycles();
684 sample.num = num; 641 sample.num = num;
685 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); 642 mix_pool_bytes(&input_pool, &sample, sizeof(sample));
686 643
687 /* 644 /*
688 * Calculate number of bits of randomness we probably added. 645 * Calculate number of bits of randomness we probably added.
@@ -739,48 +696,17 @@ void add_input_randomness(unsigned int type, unsigned int code,
739} 696}
740EXPORT_SYMBOL_GPL(add_input_randomness); 697EXPORT_SYMBOL_GPL(add_input_randomness);
741 698
742static DEFINE_PER_CPU(struct fast_pool, irq_randomness); 699void add_interrupt_randomness(int irq)
743
744void add_interrupt_randomness(int irq, int irq_flags)
745{ 700{
746 struct entropy_store *r; 701 struct timer_rand_state *state;
747 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
748 struct pt_regs *regs = get_irq_regs();
749 unsigned long now = jiffies;
750 __u32 input[4], cycles = get_cycles();
751
752 input[0] = cycles ^ jiffies;
753 input[1] = irq;
754 if (regs) {
755 __u64 ip = instruction_pointer(regs);
756 input[2] = ip;
757 input[3] = ip >> 32;
758 }
759 702
760 fast_mix(fast_pool, input, sizeof(input)); 703 state = get_timer_rand_state(irq);
761 704
762 if ((fast_pool->count & 1023) && 705 if (state == NULL)
763 !time_after(now, fast_pool->last + HZ))
764 return; 706 return;
765 707
766 fast_pool->last = now; 708 DEBUG_ENT("irq event %d\n", irq);
767 709 add_timer_randomness(state, 0x100 + irq);
768 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
769 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
770 /*
771 * If we don't have a valid cycle counter, and we see
772 * back-to-back timer interrupts, then skip giving credit for
773 * any entropy.
774 */
775 if (cycles == 0) {
776 if (irq_flags & __IRQF_TIMER) {
777 if (fast_pool->last_timer_intr)
778 return;
779 fast_pool->last_timer_intr = 1;
780 } else
781 fast_pool->last_timer_intr = 0;
782 }
783 credit_entropy_bits(r, 1);
784} 710}
785 711
786#ifdef CONFIG_BLOCK 712#ifdef CONFIG_BLOCK
@@ -812,7 +738,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
812 */ 738 */
813static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 739static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
814{ 740{
815 __u32 tmp[OUTPUT_POOL_WORDS]; 741 __u32 tmp[OUTPUT_POOL_WORDS];
816 742
817 if (r->pull && r->entropy_count < nbytes * 8 && 743 if (r->pull && r->entropy_count < nbytes * 8 &&
818 r->entropy_count < r->poolinfo->POOLBITS) { 744 r->entropy_count < r->poolinfo->POOLBITS) {
@@ -826,12 +752,12 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
826 bytes = min_t(int, bytes, sizeof(tmp)); 752 bytes = min_t(int, bytes, sizeof(tmp));
827 753
828 DEBUG_ENT("going to reseed %s with %d bits " 754 DEBUG_ENT("going to reseed %s with %d bits "
829 "(%zu of %d requested)\n", 755 "(%d of %d requested)\n",
830 r->name, bytes * 8, nbytes * 8, r->entropy_count); 756 r->name, bytes * 8, nbytes * 8, r->entropy_count);
831 757
832 bytes = extract_entropy(r->pull, tmp, bytes, 758 bytes = extract_entropy(r->pull, tmp, bytes,
833 random_read_wakeup_thresh / 8, rsvd); 759 random_read_wakeup_thresh / 8, rsvd);
834 mix_pool_bytes(r, tmp, bytes, NULL); 760 mix_pool_bytes(r, tmp, bytes);
835 credit_entropy_bits(r, bytes*8); 761 credit_entropy_bits(r, bytes*8);
836 } 762 }
837} 763}
@@ -857,7 +783,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
857 spin_lock_irqsave(&r->lock, flags); 783 spin_lock_irqsave(&r->lock, flags);
858 784
859 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS); 785 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
860 DEBUG_ENT("trying to extract %zu bits from %s\n", 786 DEBUG_ENT("trying to extract %d bits from %s\n",
861 nbytes * 8, r->name); 787 nbytes * 8, r->name);
862 788
863 /* Can we pull enough? */ 789 /* Can we pull enough? */
@@ -879,7 +805,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
879 } 805 }
880 } 806 }
881 807
882 DEBUG_ENT("debiting %zu entropy credits from %s%s\n", 808 DEBUG_ENT("debiting %d entropy credits from %s%s\n",
883 nbytes * 8, r->name, r->limit ? "" : " (unlimited)"); 809 nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
884 810
885 spin_unlock_irqrestore(&r->lock, flags); 811 spin_unlock_irqrestore(&r->lock, flags);
@@ -890,19 +816,13 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
890static void extract_buf(struct entropy_store *r, __u8 *out) 816static void extract_buf(struct entropy_store *r, __u8 *out)
891{ 817{
892 int i; 818 int i;
893 union { 819 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
894 __u32 w[5];
895 unsigned long l[LONGS(EXTRACT_SIZE)];
896 } hash;
897 __u32 workspace[SHA_WORKSPACE_WORDS];
898 __u8 extract[64]; 820 __u8 extract[64];
899 unsigned long flags;
900 821
901 /* Generate a hash across the pool, 16 words (512 bits) at a time */ 822 /* Generate a hash across the pool, 16 words (512 bits) at a time */
902 sha_init(hash.w); 823 sha_init(hash);
903 spin_lock_irqsave(&r->lock, flags);
904 for (i = 0; i < r->poolinfo->poolwords; i += 16) 824 for (i = 0; i < r->poolinfo->poolwords; i += 16)
905 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); 825 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
906 826
907 /* 827 /*
908 * We mix the hash back into the pool to prevent backtracking 828 * We mix the hash back into the pool to prevent backtracking
@@ -913,14 +833,13 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
913 * brute-forcing the feedback as hard as brute-forcing the 833 * brute-forcing the feedback as hard as brute-forcing the
914 * hash. 834 * hash.
915 */ 835 */
916 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); 836 mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
917 spin_unlock_irqrestore(&r->lock, flags);
918 837
919 /* 838 /*
920 * To avoid duplicates, we atomically extract a portion of the 839 * To avoid duplicates, we atomically extract a portion of the
921 * pool while mixing, and hash one final time. 840 * pool while mixing, and hash one final time.
922 */ 841 */
923 sha_transform(hash.w, extract, workspace); 842 sha_transform(hash, extract, workspace);
924 memset(extract, 0, sizeof(extract)); 843 memset(extract, 0, sizeof(extract));
925 memset(workspace, 0, sizeof(workspace)); 844 memset(workspace, 0, sizeof(workspace));
926 845
@@ -929,36 +848,20 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
929 * pattern, we fold it in half. Thus, we always feed back 848 * pattern, we fold it in half. Thus, we always feed back
930 * twice as much data as we output. 849 * twice as much data as we output.
931 */ 850 */
932 hash.w[0] ^= hash.w[3]; 851 hash[0] ^= hash[3];
933 hash.w[1] ^= hash.w[4]; 852 hash[1] ^= hash[4];
934 hash.w[2] ^= rol32(hash.w[2], 16); 853 hash[2] ^= rol32(hash[2], 16);
935 854 memcpy(out, hash, EXTRACT_SIZE);
936 /* 855 memset(hash, 0, sizeof(hash));
937 * If we have a architectural hardware random number
938 * generator, mix that in, too.
939 */
940 for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
941 unsigned long v;
942 if (!arch_get_random_long(&v))
943 break;
944 hash.l[i] ^= v;
945 }
946
947 memcpy(out, &hash, EXTRACT_SIZE);
948 memset(&hash, 0, sizeof(hash));
949} 856}
950 857
951static ssize_t extract_entropy(struct entropy_store *r, void *buf, 858static ssize_t extract_entropy(struct entropy_store *r, void *buf,
952 size_t nbytes, int min, int reserved) 859 size_t nbytes, int min, int reserved)
953{ 860{
954 ssize_t ret = 0, i; 861 ssize_t ret = 0, i;
955 __u8 tmp[EXTRACT_SIZE]; 862 __u8 tmp[EXTRACT_SIZE];
863 unsigned long flags;
956 864
957 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
958 if (fips_enabled && !r->last_data_init)
959 nbytes += EXTRACT_SIZE;
960
961 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
962 xfer_secondary_pool(r, nbytes); 865 xfer_secondary_pool(r, nbytes);
963 nbytes = account(r, nbytes, min, reserved); 866 nbytes = account(r, nbytes, min, reserved);
964 867
@@ -966,19 +869,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
966 extract_buf(r, tmp); 869 extract_buf(r, tmp);
967 870
968 if (fips_enabled) { 871 if (fips_enabled) {
969 unsigned long flags;
970
971
972 /* prime last_data value if need be, per fips 140-2 */
973 if (!r->last_data_init) {
974 spin_lock_irqsave(&r->lock, flags);
975 memcpy(r->last_data, tmp, EXTRACT_SIZE);
976 r->last_data_init = true;
977 nbytes -= EXTRACT_SIZE;
978 spin_unlock_irqrestore(&r->lock, flags);
979 extract_buf(r, tmp);
980 }
981
982 spin_lock_irqsave(&r->lock, flags); 872 spin_lock_irqsave(&r->lock, flags);
983 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 873 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
984 panic("Hardware RNG duplicated output!\n"); 874 panic("Hardware RNG duplicated output!\n");
@@ -1004,7 +894,6 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1004 ssize_t ret = 0, i; 894 ssize_t ret = 0, i;
1005 __u8 tmp[EXTRACT_SIZE]; 895 __u8 tmp[EXTRACT_SIZE];
1006 896
1007 trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
1008 xfer_secondary_pool(r, nbytes); 897 xfer_secondary_pool(r, nbytes);
1009 nbytes = account(r, nbytes, 0, 0); 898 nbytes = account(r, nbytes, 0, 0);
1010 899
@@ -1038,9 +927,8 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1038 927
1039/* 928/*
1040 * This function is the exported kernel interface. It returns some 929 * This function is the exported kernel interface. It returns some
1041 * number of good random numbers, suitable for key generation, seeding 930 * number of good random numbers, suitable for seeding TCP sequence
1042 * TCP sequence numbers, etc. It does not use the hw random number 931 * numbers, etc.
1043 * generator, if available; use get_random_bytes_arch() for that.
1044 */ 932 */
1045void get_random_bytes(void *buf, int nbytes) 933void get_random_bytes(void *buf, int nbytes)
1046{ 934{
@@ -1049,39 +937,6 @@ void get_random_bytes(void *buf, int nbytes)
1049EXPORT_SYMBOL(get_random_bytes); 937EXPORT_SYMBOL(get_random_bytes);
1050 938
1051/* 939/*
1052 * This function will use the architecture-specific hardware random
1053 * number generator if it is available. The arch-specific hw RNG will
1054 * almost certainly be faster than what we can do in software, but it
1055 * is impossible to verify that it is implemented securely (as
1056 * opposed, to, say, the AES encryption of a sequence number using a
1057 * key known by the NSA). So it's useful if we need the speed, but
1058 * only if we're willing to trust the hardware manufacturer not to
1059 * have put in a back door.
1060 */
1061void get_random_bytes_arch(void *buf, int nbytes)
1062{
1063 char *p = buf;
1064
1065 trace_get_random_bytes(nbytes, _RET_IP_);
1066 while (nbytes) {
1067 unsigned long v;
1068 int chunk = min(nbytes, (int)sizeof(unsigned long));
1069
1070 if (!arch_get_random_long(&v))
1071 break;
1072
1073 memcpy(p, &v, chunk);
1074 p += chunk;
1075 nbytes -= chunk;
1076 }
1077
1078 if (nbytes)
1079 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1080}
1081EXPORT_SYMBOL(get_random_bytes_arch);
1082
1083
1084/*
1085 * init_std_data - initialize pool with system data 940 * init_std_data - initialize pool with system data
1086 * 941 *
1087 * @r: pool to initialize 942 * @r: pool to initialize
@@ -1092,32 +947,18 @@ EXPORT_SYMBOL(get_random_bytes_arch);
1092 */ 947 */
1093static void init_std_data(struct entropy_store *r) 948static void init_std_data(struct entropy_store *r)
1094{ 949{
1095 int i; 950 ktime_t now;
1096 ktime_t now = ktime_get_real(); 951 unsigned long flags;
1097 unsigned long rv;
1098 952
953 spin_lock_irqsave(&r->lock, flags);
1099 r->entropy_count = 0; 954 r->entropy_count = 0;
1100 r->entropy_total = 0; 955 spin_unlock_irqrestore(&r->lock, flags);
1101 r->last_data_init = false; 956
1102 mix_pool_bytes(r, &now, sizeof(now), NULL); 957 now = ktime_get_real();
1103 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) { 958 mix_pool_bytes(r, &now, sizeof(now));
1104 if (!arch_get_random_long(&rv)) 959 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1105 break;
1106 mix_pool_bytes(r, &rv, sizeof(rv), NULL);
1107 }
1108 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
1109} 960}
1110 961
1111/*
1112 * Note that setup_arch() may call add_device_randomness()
1113 * long before we get here. This allows seeding of the pools
1114 * with some platform dependent data very early in the boot
1115 * process. But it limits our options here. We must use
1116 * statically allocated structures that already have all
1117 * initializations complete at compile time. We should also
1118 * take care not to overwrite the precious per platform data
1119 * we were given.
1120 */
1121static int rand_initialize(void) 962static int rand_initialize(void)
1122{ 963{
1123 init_std_data(&input_pool); 964 init_std_data(&input_pool);
@@ -1127,6 +968,24 @@ static int rand_initialize(void)
1127} 968}
1128module_init(rand_initialize); 969module_init(rand_initialize);
1129 970
971void rand_initialize_irq(int irq)
972{
973 struct timer_rand_state *state;
974
975 state = get_timer_rand_state(irq);
976
977 if (state)
978 return;
979
980 /*
981 * If kzalloc returns null, we just won't use that entropy
982 * source.
983 */
984 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
985 if (state)
986 set_timer_rand_state(irq, state);
987}
988
1130#ifdef CONFIG_BLOCK 989#ifdef CONFIG_BLOCK
1131void rand_initialize_disk(struct gendisk *disk) 990void rand_initialize_disk(struct gendisk *disk)
1132{ 991{
@@ -1155,16 +1014,11 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1155 if (n > SEC_XFER_SIZE) 1014 if (n > SEC_XFER_SIZE)
1156 n = SEC_XFER_SIZE; 1015 n = SEC_XFER_SIZE;
1157 1016
1158 DEBUG_ENT("reading %zu bits\n", n*8); 1017 DEBUG_ENT("reading %d bits\n", n*8);
1159 1018
1160 n = extract_entropy_user(&blocking_pool, buf, n); 1019 n = extract_entropy_user(&blocking_pool, buf, n);
1161 1020
1162 if (n < 0) { 1021 DEBUG_ENT("read got %d bits (%d still needed)\n",
1163 retval = n;
1164 break;
1165 }
1166
1167 DEBUG_ENT("read got %zd bits (%zd still needed)\n",
1168 n*8, (nbytes-n)*8); 1022 n*8, (nbytes-n)*8);
1169 1023
1170 if (n == 0) { 1024 if (n == 0) {
@@ -1189,6 +1043,10 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1189 continue; 1043 continue;
1190 } 1044 }
1191 1045
1046 if (n < 0) {
1047 retval = n;
1048 break;
1049 }
1192 count += n; 1050 count += n;
1193 buf += n; 1051 buf += n;
1194 nbytes -= n; 1052 nbytes -= n;
@@ -1235,7 +1093,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1235 count -= bytes; 1093 count -= bytes;
1236 p += bytes; 1094 p += bytes;
1237 1095
1238 mix_pool_bytes(r, buf, bytes, NULL); 1096 mix_pool_bytes(r, buf, bytes);
1239 cond_resched(); 1097 cond_resched();
1240 } 1098 }
1241 1099
@@ -1378,15 +1236,10 @@ static int proc_do_uuid(ctl_table *table, int write,
1378 uuid = table->data; 1236 uuid = table->data;
1379 if (!uuid) { 1237 if (!uuid) {
1380 uuid = tmp_uuid; 1238 uuid = tmp_uuid;
1381 generate_random_uuid(uuid); 1239 uuid[8] = 0;
1382 } else {
1383 static DEFINE_SPINLOCK(bootid_spinlock);
1384
1385 spin_lock(&bootid_spinlock);
1386 if (!uuid[8])
1387 generate_random_uuid(uuid);
1388 spin_unlock(&bootid_spinlock);
1389 } 1240 }
1241 if (uuid[8] == 0)
1242 generate_random_uuid(uuid);
1390 1243
1391 sprintf(buf, "%pU", uuid); 1244 sprintf(buf, "%pU", uuid);
1392 1245
@@ -1397,7 +1250,6 @@ static int proc_do_uuid(ctl_table *table, int write,
1397} 1250}
1398 1251
1399static int sysctl_poolsize = INPUT_POOL_WORDS * 32; 1252static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1400extern ctl_table random_table[];
1401ctl_table random_table[] = { 1253ctl_table random_table[] = {
1402 { 1254 {
1403 .procname = "poolsize", 1255 .procname = "poolsize",
@@ -1463,17 +1315,12 @@ late_initcall(random_int_secret_init);
1463 * value is not cryptographically secure but for several uses the cost of 1315 * value is not cryptographically secure but for several uses the cost of
1464 * depleting entropy is too high 1316 * depleting entropy is too high
1465 */ 1317 */
1466static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); 1318DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1467unsigned int get_random_int(void) 1319unsigned int get_random_int(void)
1468{ 1320{
1469 __u32 *hash; 1321 __u32 *hash = get_cpu_var(get_random_int_hash);
1470 unsigned int ret; 1322 unsigned int ret;
1471 1323
1472 if (arch_get_random_int(&ret))
1473 return ret;
1474
1475 hash = get_cpu_var(get_random_int_hash);
1476
1477 hash[0] += current->pid + jiffies + get_cycles(); 1324 hash[0] += current->pid + jiffies + get_cycles();
1478 md5_transform(hash, random_int_secret); 1325 md5_transform(hash, random_int_secret);
1479 ret = hash[0]; 1326 ret = hash[0];
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d0981..b33e8ea314e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -308,7 +308,7 @@ static const struct file_operations raw_ctl_fops = {
308 308
309static struct cdev raw_cdev; 309static struct cdev raw_cdev;
310 310
311static char *raw_devnode(struct device *dev, umode_t *mode) 311static char *raw_devnode(struct device *dev, mode_t *mode)
312{ 312{
313 return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev)); 313 return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
314} 314}
@@ -324,12 +324,13 @@ static int __init raw_init(void)
324 max_raw_minors = MAX_RAW_MINORS; 324 max_raw_minors = MAX_RAW_MINORS;
325 } 325 }
326 326
327 raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors); 327 raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
328 if (!raw_devices) { 328 if (!raw_devices) {
329 printk(KERN_ERR "Not enough memory for raw device structures\n"); 329 printk(KERN_ERR "Not enough memory for raw device structures\n");
330 ret = -ENOMEM; 330 ret = -ENOMEM;
331 goto error; 331 goto error;
332 } 332 }
333 memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
333 334
334 ret = register_chrdev_region(dev, max_raw_minors, "raw"); 335 ret = register_chrdev_region(dev, max_raw_minors, "raw");
335 if (ret) 336 if (ret)
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 91470fdbab2..dfa8b3062fd 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -57,8 +57,8 @@
57 * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with 57 * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
58 * interrupts disabled. Due to the index-port/data-port (0x70/0x71) 58 * interrupts disabled. Due to the index-port/data-port (0x70/0x71)
59 * design of the RTC, we don't want two different things trying to 59 * design of the RTC, we don't want two different things trying to
60 * get to it at once. (e.g. the periodic 11 min sync from 60 * get to it at once. (e.g. the periodic 11 min sync from time.c vs.
61 * kernel/time/ntp.c vs. this driver.) 61 * this driver.)
62 */ 62 */
63 63
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
@@ -80,9 +80,9 @@
80#include <linux/bcd.h> 80#include <linux/bcd.h>
81#include <linux/delay.h> 81#include <linux/delay.h>
82#include <linux/uaccess.h> 82#include <linux/uaccess.h>
83#include <linux/ratelimit.h>
84 83
85#include <asm/current.h> 84#include <asm/current.h>
85#include <asm/system.h>
86 86
87#ifdef CONFIG_X86 87#ifdef CONFIG_X86
88#include <asm/hpet.h> 88#include <asm/hpet.h>
@@ -411,7 +411,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
411 case RTC_IRQP_READ: 411 case RTC_IRQP_READ:
412 case RTC_IRQP_SET: 412 case RTC_IRQP_SET:
413 return -EINVAL; 413 return -EINVAL;
414 } 414 };
415 } 415 }
416#endif 416#endif
417 417
@@ -1195,8 +1195,10 @@ static void rtc_dropped_irq(unsigned long data)
1195 1195
1196 spin_unlock_irq(&rtc_lock); 1196 spin_unlock_irq(&rtc_lock);
1197 1197
1198 printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", 1198 if (printk_ratelimit()) {
1199 freq); 1199 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
1200 freq);
1201 }
1200 1202
1201 /* Now we have new data */ 1203 /* Now we have new data */
1202 wake_up_interruptible(&rtc_wait); 1204 wake_up_interruptible(&rtc_wait);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d780295a147..1ee8ce7d276 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -54,6 +54,7 @@
54 54
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <asm/io.h> 56#include <asm/io.h>
57#include <asm/system.h>
57 58
58#include <linux/sonypi.h> 59#include <linux/sonypi.h>
59 60
@@ -1164,7 +1165,7 @@ static struct acpi_driver sonypi_acpi_driver = {
1164}; 1165};
1165#endif 1166#endif
1166 1167
1167static int sonypi_create_input_devices(struct platform_device *pdev) 1168static int __devinit sonypi_create_input_devices(struct platform_device *pdev)
1168{ 1169{
1169 struct input_dev *jog_dev; 1170 struct input_dev *jog_dev;
1170 struct input_dev *key_dev; 1171 struct input_dev *key_dev;
@@ -1225,7 +1226,7 @@ static int sonypi_create_input_devices(struct platform_device *pdev)
1225 return error; 1226 return error;
1226} 1227}
1227 1228
1228static int sonypi_setup_ioports(struct sonypi_device *dev, 1229static int __devinit sonypi_setup_ioports(struct sonypi_device *dev,
1229 const struct sonypi_ioport_list *ioport_list) 1230 const struct sonypi_ioport_list *ioport_list)
1230{ 1231{
1231 /* try to detect if sony-laptop is being used and thus 1232 /* try to detect if sony-laptop is being used and thus
@@ -1265,7 +1266,7 @@ static int sonypi_setup_ioports(struct sonypi_device *dev,
1265 return -EBUSY; 1266 return -EBUSY;
1266} 1267}
1267 1268
1268static int sonypi_setup_irq(struct sonypi_device *dev, 1269static int __devinit sonypi_setup_irq(struct sonypi_device *dev,
1269 const struct sonypi_irq_list *irq_list) 1270 const struct sonypi_irq_list *irq_list)
1270{ 1271{
1271 while (irq_list->irq) { 1272 while (irq_list->irq) {
@@ -1282,7 +1283,7 @@ static int sonypi_setup_irq(struct sonypi_device *dev,
1282 return -EBUSY; 1283 return -EBUSY;
1283} 1284}
1284 1285
1285static void sonypi_display_info(void) 1286static void __devinit sonypi_display_info(void)
1286{ 1287{
1287 printk(KERN_INFO "sonypi: detected type%d model, " 1288 printk(KERN_INFO "sonypi: detected type%d model, "
1288 "verbose = %d, fnkeyinit = %s, camera = %s, " 1289 "verbose = %d, fnkeyinit = %s, camera = %s, "
@@ -1304,7 +1305,7 @@ static void sonypi_display_info(void)
1304 sonypi_misc_device.minor); 1305 sonypi_misc_device.minor);
1305} 1306}
1306 1307
1307static int sonypi_probe(struct platform_device *dev) 1308static int __devinit sonypi_probe(struct platform_device *dev)
1308{ 1309{
1309 const struct sonypi_ioport_list *ioport_list; 1310 const struct sonypi_ioport_list *ioport_list;
1310 const struct sonypi_irq_list *irq_list; 1311 const struct sonypi_irq_list *irq_list;
@@ -1428,12 +1429,12 @@ static int sonypi_probe(struct platform_device *dev)
1428 return error; 1429 return error;
1429} 1430}
1430 1431
1431static int sonypi_remove(struct platform_device *dev) 1432static int __devexit sonypi_remove(struct platform_device *dev)
1432{ 1433{
1433 sonypi_disable(); 1434 sonypi_disable();
1434 1435
1435 synchronize_irq(sonypi_device.irq); 1436 synchronize_irq(sonypi_device.irq);
1436 flush_work(&sonypi_device.input_work); 1437 flush_work_sync(&sonypi_device.input_work);
1437 1438
1438 if (useinput) { 1439 if (useinput) {
1439 input_unregister_device(sonypi_device.input_key_dev); 1440 input_unregister_device(sonypi_device.input_key_dev);
@@ -1456,10 +1457,10 @@ static int sonypi_remove(struct platform_device *dev)
1456 return 0; 1457 return 0;
1457} 1458}
1458 1459
1459#ifdef CONFIG_PM_SLEEP 1460#ifdef CONFIG_PM
1460static int old_camera_power; 1461static int old_camera_power;
1461 1462
1462static int sonypi_suspend(struct device *dev) 1463static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
1463{ 1464{
1464 old_camera_power = sonypi_device.camera_power; 1465 old_camera_power = sonypi_device.camera_power;
1465 sonypi_disable(); 1466 sonypi_disable();
@@ -1467,16 +1468,14 @@ static int sonypi_suspend(struct device *dev)
1467 return 0; 1468 return 0;
1468} 1469}
1469 1470
1470static int sonypi_resume(struct device *dev) 1471static int sonypi_resume(struct platform_device *dev)
1471{ 1472{
1472 sonypi_enable(old_camera_power); 1473 sonypi_enable(old_camera_power);
1473 return 0; 1474 return 0;
1474} 1475}
1475
1476static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
1477#define SONYPI_PM (&sonypi_pm)
1478#else 1476#else
1479#define SONYPI_PM NULL 1477#define sonypi_suspend NULL
1478#define sonypi_resume NULL
1480#endif 1479#endif
1481 1480
1482static void sonypi_shutdown(struct platform_device *dev) 1481static void sonypi_shutdown(struct platform_device *dev)
@@ -1488,11 +1487,12 @@ static struct platform_driver sonypi_driver = {
1488 .driver = { 1487 .driver = {
1489 .name = "sonypi", 1488 .name = "sonypi",
1490 .owner = THIS_MODULE, 1489 .owner = THIS_MODULE,
1491 .pm = SONYPI_PM,
1492 }, 1490 },
1493 .probe = sonypi_probe, 1491 .probe = sonypi_probe,
1494 .remove = sonypi_remove, 1492 .remove = __devexit_p(sonypi_remove),
1495 .shutdown = sonypi_shutdown, 1493 .shutdown = sonypi_shutdown,
1494 .suspend = sonypi_suspend,
1495 .resume = sonypi_resume,
1496}; 1496};
1497 1497
1498static struct platform_device *sonypi_platform_device; 1498static struct platform_device *sonypi_platform_device;
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 34c63f85104..ad264185eb1 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -284,7 +284,7 @@ static void tb0219_pci_irq_init(void)
284 vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW); 284 vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
285} 285}
286 286
287static int tb0219_probe(struct platform_device *dev) 287static int __devinit tb0219_probe(struct platform_device *dev)
288{ 288{
289 int retval; 289 int retval;
290 290
@@ -318,7 +318,7 @@ static int tb0219_probe(struct platform_device *dev)
318 return 0; 318 return 0;
319} 319}
320 320
321static int tb0219_remove(struct platform_device *dev) 321static int __devexit tb0219_remove(struct platform_device *dev)
322{ 322{
323 _machine_restart = old_machine_restart; 323 _machine_restart = old_machine_restart;
324 324
@@ -334,7 +334,7 @@ static struct platform_device *tb0219_platform_device;
334 334
335static struct platform_driver tb0219_device_driver = { 335static struct platform_driver tb0219_device_driver = {
336 .probe = tb0219_probe, 336 .probe = tb0219_probe,
337 .remove = tb0219_remove, 337 .remove = __devexit_p(tb0219_remove),
338 .driver = { 338 .driver = {
339 .name = "TB0219", 339 .name = "TB0219",
340 .owner = THIS_MODULE, 340 .owner = THIS_MODULE,
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 3b22a606f79..cf3ee008dca 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -194,17 +194,17 @@ static ssize_t srom_read(struct file *filp, char __user *buf,
194 194
195 hv_retval = _srom_read(srom->hv_devhdl, kernbuf, 195 hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
196 *f_pos, bytes_this_pass); 196 *f_pos, bytes_this_pass);
197 if (hv_retval <= 0) { 197 if (hv_retval > 0) {
198 if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
199 retval = -EFAULT;
200 break;
201 }
202 } else if (hv_retval <= 0) {
198 if (retval == 0) 203 if (retval == 0)
199 retval = hv_retval; 204 retval = hv_retval;
200 break; 205 break;
201 } 206 }
202 207
203 if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
204 retval = -EFAULT;
205 break;
206 }
207
208 retval += hv_retval; 208 retval += hv_retval;
209 *f_pos += hv_retval; 209 *f_pos += hv_retval;
210 buf += hv_retval; 210 buf += hv_retval;
@@ -329,7 +329,7 @@ static struct device_attribute srom_dev_attrs[] = {
329 __ATTR_NULL 329 __ATTR_NULL
330}; 330};
331 331
332static char *srom_devnode(struct device *dev, umode_t *mode) 332static char *srom_devnode(struct device *dev, mode_t *mode)
333{ 333{
334 *mode = S_IRUGO | S_IWUSR; 334 *mode = S_IRUGO | S_IWUSR;
335 return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); 335 return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index e95e0ab0bd8..0c964cdcc22 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -784,10 +784,8 @@ static int __init tlclk_init(void)
784 } 784 }
785 tlclk_major = ret; 785 tlclk_major = ret;
786 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); 786 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
787 if (!alarm_events) { 787 if (!alarm_events)
788 ret = -ENOMEM;
789 goto out1; 788 goto out1;
790 }
791 789
792 /* Read telecom clock IRQ number (Set by BIOS) */ 790 /* Read telecom clock IRQ number (Set by BIOS) */
793 if (!request_region(TLCLK_BASE, 8, "telco_clock")) { 791 if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -799,7 +797,7 @@ static int __init tlclk_init(void)
799 telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); 797 telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
800 798
801 if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ 799 if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
802 printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", 800 printk(KERN_ERR "telclk_interrup = 0x%x non-mcpbl0010 hw.\n",
803 telclk_interrupt); 801 telclk_interrupt);
804 ret = -ENXIO; 802 ret = -ENXIO;
805 goto out3; 803 goto out3;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 915875e431d..fa567f1158c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,6 +5,7 @@
5menuconfig TCG_TPM 5menuconfig TCG_TPM
6 tristate "TPM Hardware Support" 6 tristate "TPM Hardware Support"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 depends on EXPERIMENTAL
8 select SECURITYFS 9 select SECURITYFS
9 ---help--- 10 ---help---
10 If you have a TPM security chip in your system, which 11 If you have a TPM security chip in your system, which
@@ -26,27 +27,14 @@ if TCG_TPM
26 27
27config TCG_TIS 28config TCG_TIS
28 tristate "TPM Interface Specification 1.2 Interface" 29 tristate "TPM Interface Specification 1.2 Interface"
29 depends on X86
30 ---help--- 30 ---help---
31 If you have a TPM security chip that is compliant with the 31 If you have a TPM security chip that is compliant with the
32 TCG TIS 1.2 TPM specification say Yes and it will be accessible 32 TCG TIS 1.2 TPM specification say Yes and it will be accessible
33 from within Linux. To compile this driver as a module, choose 33 from within Linux. To compile this driver as a module, choose
34 M here; the module will be called tpm_tis. 34 M here; the module will be called tpm_tis.
35 35
36config TCG_TIS_I2C_INFINEON
37 tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
38 depends on I2C
39 ---help---
40 If you have a TPM security chip that is compliant with the
41 TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack
42 Specification 0.20 say Yes and it will be accessible from within
43 Linux.
44 To compile this driver as a module, choose M here; the module
45 will be called tpm_tis_i2c_infineon.
46
47config TCG_NSC 36config TCG_NSC
48 tristate "National Semiconductor TPM Interface" 37 tristate "National Semiconductor TPM Interface"
49 depends on X86
50 ---help--- 38 ---help---
51 If you have a TPM security chip from National Semiconductor 39 If you have a TPM security chip from National Semiconductor
52 say Yes and it will be accessible from within Linux. To 40 say Yes and it will be accessible from within Linux. To
@@ -73,12 +61,4 @@ config TCG_INFINEON
73 Further information on this driver and the supported hardware 61 Further information on this driver and the supported hardware
74 can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 62 can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
75 63
76config TCG_IBMVTPM
77 tristate "IBM VTPM Interface"
78 depends on PPC64
79 ---help---
80 If you have IBM virtual TPM (VTPM) support say Yes and it
81 will be accessible from within Linux. To compile this driver
82 as a module, choose M here; the module will be called tpm_ibmvtpm.
83
84endif # TCG_TPM 64endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 5b3fc8bc6c1..ea3a1e02a82 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -4,16 +4,8 @@
4obj-$(CONFIG_TCG_TPM) += tpm.o 4obj-$(CONFIG_TCG_TPM) += tpm.o
5ifdef CONFIG_ACPI 5ifdef CONFIG_ACPI
6 obj-$(CONFIG_TCG_TPM) += tpm_bios.o 6 obj-$(CONFIG_TCG_TPM) += tpm_bios.o
7 tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
8else
9ifdef CONFIG_TCG_IBMVTPM
10 obj-$(CONFIG_TCG_TPM) += tpm_bios.o
11 tpm_bios-objs += tpm_eventlog.o tpm_of.o
12endif
13endif 7endif
14obj-$(CONFIG_TCG_TIS) += tpm_tis.o 8obj-$(CONFIG_TCG_TIS) += tpm_tis.o
15obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
16obj-$(CONFIG_TCG_NSC) += tpm_nsc.o 9obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
17obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o 10obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
18obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o 11obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
19obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 93211df52aa..9ca5c021d0b 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -27,10 +27,14 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/freezer.h>
31 30
32#include "tpm.h" 31#include "tpm.h"
33#include "tpm_eventlog.h" 32
33enum tpm_const {
34 TPM_MINOR = 224, /* officially assigned */
35 TPM_BUFSIZE = 4096,
36 TPM_NUM_DEVICES = 256,
37};
34 38
35enum tpm_duration { 39enum tpm_duration {
36 TPM_SHORT = 0, 40 TPM_SHORT = 0,
@@ -436,6 +440,7 @@ out:
436} 440}
437 441
438#define TPM_DIGEST_SIZE 20 442#define TPM_DIGEST_SIZE 20
443#define TPM_ERROR_SIZE 10
439#define TPM_RET_CODE_IDX 6 444#define TPM_RET_CODE_IDX 6
440 445
441enum tpm_capabilities { 446enum tpm_capabilities {
@@ -464,20 +469,17 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
464 len = tpm_transmit(chip,(u8 *) cmd, len); 469 len = tpm_transmit(chip,(u8 *) cmd, len);
465 if (len < 0) 470 if (len < 0)
466 return len; 471 return len;
467 else if (len < TPM_HEADER_SIZE) 472 if (len == TPM_ERROR_SIZE) {
468 return -EFAULT; 473 err = be32_to_cpu(cmd->header.out.return_code);
469 474 dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
470 err = be32_to_cpu(cmd->header.out.return_code); 475 return err;
471 if (err != 0) 476 }
472 dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc); 477 return 0;
473
474 return err;
475} 478}
476 479
477#define TPM_INTERNAL_RESULT_SIZE 200 480#define TPM_INTERNAL_RESULT_SIZE 200
478#define TPM_TAG_RQU_COMMAND cpu_to_be16(193) 481#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
479#define TPM_ORD_GET_CAP cpu_to_be32(101) 482#define TPM_ORD_GET_CAP cpu_to_be32(101)
480#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
481 483
482static const struct tpm_input_header tpm_getcap_header = { 484static const struct tpm_input_header tpm_getcap_header = {
483 .tag = TPM_TAG_RQU_COMMAND, 485 .tag = TPM_TAG_RQU_COMMAND,
@@ -528,7 +530,7 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
528} 530}
529EXPORT_SYMBOL_GPL(tpm_gen_interrupt); 531EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
530 532
531int tpm_get_timeouts(struct tpm_chip *chip) 533void tpm_get_timeouts(struct tpm_chip *chip)
532{ 534{
533 struct tpm_cmd_t tpm_cmd; 535 struct tpm_cmd_t tpm_cmd;
534 struct timeout_t *timeout_cap; 536 struct timeout_t *timeout_cap;
@@ -550,7 +552,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
550 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || 552 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
551 be32_to_cpu(tpm_cmd.header.out.length) 553 be32_to_cpu(tpm_cmd.header.out.length)
552 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32)) 554 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
553 return -EINVAL; 555 return;
554 556
555 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; 557 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
556 /* Don't overwrite default if value is 0 */ 558 /* Don't overwrite default if value is 0 */
@@ -581,12 +583,12 @@ duration:
581 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 583 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
582 "attempting to determine the durations"); 584 "attempting to determine the durations");
583 if (rc) 585 if (rc)
584 return rc; 586 return;
585 587
586 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || 588 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
587 be32_to_cpu(tpm_cmd.header.out.length) 589 be32_to_cpu(tpm_cmd.header.out.length)
588 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) 590 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
589 return -EINVAL; 591 return;
590 592
591 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 593 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
592 chip->vendor.duration[TPM_SHORT] = 594 chip->vendor.duration[TPM_SHORT] =
@@ -608,36 +610,20 @@ duration:
608 chip->vendor.duration_adjusted = true; 610 chip->vendor.duration_adjusted = true;
609 dev_info(chip->dev, "Adjusting TPM timeout parameters."); 611 dev_info(chip->dev, "Adjusting TPM timeout parameters.");
610 } 612 }
611 return 0;
612} 613}
613EXPORT_SYMBOL_GPL(tpm_get_timeouts); 614EXPORT_SYMBOL_GPL(tpm_get_timeouts);
614 615
615#define TPM_ORD_CONTINUE_SELFTEST 83 616void tpm_continue_selftest(struct tpm_chip *chip)
616#define CONTINUE_SELFTEST_RESULT_SIZE 10
617
618static struct tpm_input_header continue_selftest_header = {
619 .tag = TPM_TAG_RQU_COMMAND,
620 .length = cpu_to_be32(10),
621 .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
622};
623
624/**
625 * tpm_continue_selftest -- run TPM's selftest
626 * @chip: TPM chip to use
627 *
628 * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
629 * a TPM error code.
630 */
631static int tpm_continue_selftest(struct tpm_chip *chip)
632{ 617{
633 int rc; 618 u8 data[] = {
634 struct tpm_cmd_t cmd; 619 0, 193, /* TPM_TAG_RQU_COMMAND */
620 0, 0, 0, 10, /* length */
621 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
622 };
635 623
636 cmd.header.in = continue_selftest_header; 624 tpm_transmit(chip, data, sizeof(data));
637 rc = transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
638 "continue selftest");
639 return rc;
640} 625}
626EXPORT_SYMBOL_GPL(tpm_continue_selftest);
641 627
642ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, 628ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
643 char *buf) 629 char *buf)
@@ -732,7 +718,7 @@ static struct tpm_input_header pcrread_header = {
732 .ordinal = TPM_ORDINAL_PCRREAD 718 .ordinal = TPM_ORDINAL_PCRREAD
733}; 719};
734 720
735static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) 721int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
736{ 722{
737 int rc; 723 int rc;
738 struct tpm_cmd_t cmd; 724 struct tpm_cmd_t cmd;
@@ -812,62 +798,6 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
812} 798}
813EXPORT_SYMBOL_GPL(tpm_pcr_extend); 799EXPORT_SYMBOL_GPL(tpm_pcr_extend);
814 800
815/**
816 * tpm_do_selftest - have the TPM continue its selftest and wait until it
817 * can receive further commands
818 * @chip: TPM chip to use
819 *
820 * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
821 * a TPM error code.
822 */
823int tpm_do_selftest(struct tpm_chip *chip)
824{
825 int rc;
826 unsigned int loops;
827 unsigned int delay_msec = 1000;
828 unsigned long duration;
829 struct tpm_cmd_t cmd;
830
831 duration = tpm_calc_ordinal_duration(chip,
832 TPM_ORD_CONTINUE_SELFTEST);
833
834 loops = jiffies_to_msecs(duration) / delay_msec;
835
836 rc = tpm_continue_selftest(chip);
837 /* This may fail if there was no TPM driver during a suspend/resume
838 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
839 */
840 if (rc)
841 return rc;
842
843 do {
844 /* Attempt to read a PCR value */
845 cmd.header.in = pcrread_header;
846 cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
847 rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
848
849 if (rc < TPM_HEADER_SIZE)
850 return -EFAULT;
851
852 rc = be32_to_cpu(cmd.header.out.return_code);
853 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
854 dev_info(chip->dev,
855 "TPM is disabled/deactivated (0x%X)\n", rc);
856 /* TPM is disabled and/or deactivated; driver can
857 * proceed and TPM does handle commands for
858 * suspend/resume correctly
859 */
860 return 0;
861 }
862 if (rc != TPM_WARN_DOING_SELFTEST)
863 return rc;
864 msleep(delay_msec);
865 } while (--loops > 0);
866
867 return rc;
868}
869EXPORT_SYMBOL_GPL(tpm_do_selftest);
870
871int tpm_send(u32 chip_num, void *cmd, size_t buflen) 801int tpm_send(u32 chip_num, void *cmd, size_t buflen)
872{ 802{
873 struct tpm_chip *chip; 803 struct tpm_chip *chip;
@@ -915,7 +845,7 @@ EXPORT_SYMBOL_GPL(tpm_show_pcrs);
915 845
916#define READ_PUBEK_RESULT_SIZE 314 846#define READ_PUBEK_RESULT_SIZE 314
917#define TPM_ORD_READPUBEK cpu_to_be32(124) 847#define TPM_ORD_READPUBEK cpu_to_be32(124)
918static struct tpm_input_header tpm_readpubek_header = { 848struct tpm_input_header tpm_readpubek_header = {
919 .tag = TPM_TAG_RQU_COMMAND, 849 .tag = TPM_TAG_RQU_COMMAND,
920 .length = cpu_to_be32(30), 850 .length = cpu_to_be32(30),
921 .ordinal = TPM_ORD_READPUBEK 851 .ordinal = TPM_ORD_READPUBEK
@@ -1036,9 +966,6 @@ ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
1036{ 966{
1037 struct tpm_chip *chip = dev_get_drvdata(dev); 967 struct tpm_chip *chip = dev_get_drvdata(dev);
1038 968
1039 if (chip->vendor.duration[TPM_LONG] == 0)
1040 return 0;
1041
1042 return sprintf(buf, "%d %d %d [%s]\n", 969 return sprintf(buf, "%d %d %d [%s]\n",
1043 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), 970 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
1044 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), 971 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
@@ -1075,46 +1002,6 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
1075} 1002}
1076EXPORT_SYMBOL_GPL(tpm_store_cancel); 1003EXPORT_SYMBOL_GPL(tpm_store_cancel);
1077 1004
1078int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
1079 wait_queue_head_t *queue)
1080{
1081 unsigned long stop;
1082 long rc;
1083 u8 status;
1084
1085 /* check current status */
1086 status = chip->vendor.status(chip);
1087 if ((status & mask) == mask)
1088 return 0;
1089
1090 stop = jiffies + timeout;
1091
1092 if (chip->vendor.irq) {
1093again:
1094 timeout = stop - jiffies;
1095 if ((long)timeout <= 0)
1096 return -ETIME;
1097 rc = wait_event_interruptible_timeout(*queue,
1098 ((chip->vendor.status(chip)
1099 & mask) == mask),
1100 timeout);
1101 if (rc > 0)
1102 return 0;
1103 if (rc == -ERESTARTSYS && freezing(current)) {
1104 clear_thread_flag(TIF_SIGPENDING);
1105 goto again;
1106 }
1107 } else {
1108 do {
1109 msleep(TPM_TIMEOUT);
1110 status = chip->vendor.status(chip);
1111 if ((status & mask) == mask)
1112 return 0;
1113 } while (time_before(jiffies, stop));
1114 }
1115 return -ETIME;
1116}
1117EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
1118/* 1005/*
1119 * Device file system interface to the TPM 1006 * Device file system interface to the TPM
1120 * 1007 *
@@ -1168,10 +1055,10 @@ int tpm_release(struct inode *inode, struct file *file)
1168 struct tpm_chip *chip = file->private_data; 1055 struct tpm_chip *chip = file->private_data;
1169 1056
1170 del_singleshot_timer_sync(&chip->user_read_timer); 1057 del_singleshot_timer_sync(&chip->user_read_timer);
1171 flush_work(&chip->work); 1058 flush_work_sync(&chip->work);
1172 file->private_data = NULL; 1059 file->private_data = NULL;
1173 atomic_set(&chip->data_pending, 0); 1060 atomic_set(&chip->data_pending, 0);
1174 kzfree(chip->data_buffer); 1061 kfree(chip->data_buffer);
1175 clear_bit(0, &chip->is_open); 1062 clear_bit(0, &chip->is_open);
1176 put_device(chip->dev); 1063 put_device(chip->dev);
1177 return 0; 1064 return 0;
@@ -1182,21 +1069,18 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
1182 size_t size, loff_t *off) 1069 size_t size, loff_t *off)
1183{ 1070{
1184 struct tpm_chip *chip = file->private_data; 1071 struct tpm_chip *chip = file->private_data;
1185 size_t in_size = size; 1072 size_t in_size = size, out_size;
1186 ssize_t out_size;
1187 1073
1188 /* cannot perform a write until the read has cleared 1074 /* cannot perform a write until the read has cleared
1189 either via tpm_read or a user_read_timer timeout. 1075 either via tpm_read or a user_read_timer timeout */
1190 This also prevents splitted buffered writes from blocking here. 1076 while (atomic_read(&chip->data_pending) != 0)
1191 */ 1077 msleep(TPM_TIMEOUT);
1192 if (atomic_read(&chip->data_pending) != 0)
1193 return -EBUSY;
1194
1195 if (in_size > TPM_BUFSIZE)
1196 return -E2BIG;
1197 1078
1198 mutex_lock(&chip->buffer_mutex); 1079 mutex_lock(&chip->buffer_mutex);
1199 1080
1081 if (in_size > TPM_BUFSIZE)
1082 in_size = TPM_BUFSIZE;
1083
1200 if (copy_from_user 1084 if (copy_from_user
1201 (chip->data_buffer, (void __user *) buf, in_size)) { 1085 (chip->data_buffer, (void __user *) buf, in_size)) {
1202 mutex_unlock(&chip->buffer_mutex); 1086 mutex_unlock(&chip->buffer_mutex);
@@ -1205,10 +1089,6 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
1205 1089
1206 /* atomic tpm command send and result receive */ 1090 /* atomic tpm command send and result receive */
1207 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); 1091 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
1208 if (out_size < 0) {
1209 mutex_unlock(&chip->buffer_mutex);
1210 return out_size;
1211 }
1212 1092
1213 atomic_set(&chip->data_pending, out_size); 1093 atomic_set(&chip->data_pending, out_size);
1214 mutex_unlock(&chip->buffer_mutex); 1094 mutex_unlock(&chip->buffer_mutex);
@@ -1228,24 +1108,22 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1228 int rc; 1108 int rc;
1229 1109
1230 del_singleshot_timer_sync(&chip->user_read_timer); 1110 del_singleshot_timer_sync(&chip->user_read_timer);
1231 flush_work(&chip->work); 1111 flush_work_sync(&chip->work);
1232 ret_size = atomic_read(&chip->data_pending); 1112 ret_size = atomic_read(&chip->data_pending);
1113 atomic_set(&chip->data_pending, 0);
1233 if (ret_size > 0) { /* relay data */ 1114 if (ret_size > 0) { /* relay data */
1234 ssize_t orig_ret_size = ret_size;
1235 if (size < ret_size) 1115 if (size < ret_size)
1236 ret_size = size; 1116 ret_size = size;
1237 1117
1238 mutex_lock(&chip->buffer_mutex); 1118 mutex_lock(&chip->buffer_mutex);
1239 rc = copy_to_user(buf, chip->data_buffer, ret_size); 1119 rc = copy_to_user(buf, chip->data_buffer, ret_size);
1240 memset(chip->data_buffer, 0, orig_ret_size); 1120 memset(chip->data_buffer, 0, ret_size);
1241 if (rc) 1121 if (rc)
1242 ret_size = -EFAULT; 1122 ret_size = -EFAULT;
1243 1123
1244 mutex_unlock(&chip->buffer_mutex); 1124 mutex_unlock(&chip->buffer_mutex);
1245 } 1125 }
1246 1126
1247 atomic_set(&chip->data_pending, 0);
1248
1249 return ret_size; 1127 return ret_size;
1250} 1128}
1251EXPORT_SYMBOL_GPL(tpm_read); 1129EXPORT_SYMBOL_GPL(tpm_read);
@@ -1266,7 +1144,6 @@ void tpm_remove_hardware(struct device *dev)
1266 1144
1267 misc_deregister(&chip->vendor.miscdev); 1145 misc_deregister(&chip->vendor.miscdev);
1268 sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); 1146 sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
1269 tpm_remove_ppi(&dev->kobj);
1270 tpm_bios_log_teardown(chip->bios_dir); 1147 tpm_bios_log_teardown(chip->bios_dir);
1271 1148
1272 /* write it this way to be explicit (chip->dev == dev) */ 1149 /* write it this way to be explicit (chip->dev == dev) */
@@ -1287,7 +1164,7 @@ static struct tpm_input_header savestate_header = {
1287 * We are about to suspend. Save the TPM state 1164 * We are about to suspend. Save the TPM state
1288 * so that it can be restored. 1165 * so that it can be restored.
1289 */ 1166 */
1290int tpm_pm_suspend(struct device *dev) 1167int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
1291{ 1168{
1292 struct tpm_chip *chip = dev_get_drvdata(dev); 1169 struct tpm_chip *chip = dev_get_drvdata(dev);
1293 struct tpm_cmd_t cmd; 1170 struct tpm_cmd_t cmd;
@@ -1331,65 +1208,10 @@ int tpm_pm_resume(struct device *dev)
1331} 1208}
1332EXPORT_SYMBOL_GPL(tpm_pm_resume); 1209EXPORT_SYMBOL_GPL(tpm_pm_resume);
1333 1210
1334#define TPM_GETRANDOM_RESULT_SIZE 18
1335static struct tpm_input_header tpm_getrandom_header = {
1336 .tag = TPM_TAG_RQU_COMMAND,
1337 .length = cpu_to_be32(14),
1338 .ordinal = TPM_ORD_GET_RANDOM
1339};
1340
1341/**
1342 * tpm_get_random() - Get random bytes from the tpm's RNG
1343 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
1344 * @out: destination buffer for the random bytes
1345 * @max: the max number of bytes to write to @out
1346 *
1347 * Returns < 0 on error and the number of bytes read on success
1348 */
1349int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1350{
1351 struct tpm_chip *chip;
1352 struct tpm_cmd_t tpm_cmd;
1353 u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
1354 int err, total = 0, retries = 5;
1355 u8 *dest = out;
1356
1357 chip = tpm_chip_find_get(chip_num);
1358 if (chip == NULL)
1359 return -ENODEV;
1360
1361 if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1362 return -EINVAL;
1363
1364 do {
1365 tpm_cmd.header.in = tpm_getrandom_header;
1366 tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
1367
1368 err = transmit_cmd(chip, &tpm_cmd,
1369 TPM_GETRANDOM_RESULT_SIZE + num_bytes,
1370 "attempting get random");
1371 if (err)
1372 break;
1373
1374 recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
1375 memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
1376
1377 dest += recd;
1378 total += recd;
1379 num_bytes -= recd;
1380 } while (retries-- && total < max);
1381
1382 return total ? total : -EIO;
1383}
1384EXPORT_SYMBOL_GPL(tpm_get_random);
1385
1386/* In case vendor provided release function, call it too.*/ 1211/* In case vendor provided release function, call it too.*/
1387 1212
1388void tpm_dev_vendor_release(struct tpm_chip *chip) 1213void tpm_dev_vendor_release(struct tpm_chip *chip)
1389{ 1214{
1390 if (!chip)
1391 return;
1392
1393 if (chip->vendor.release) 1215 if (chip->vendor.release)
1394 chip->vendor.release(chip->dev); 1216 chip->vendor.release(chip->dev);
1395 1217
@@ -1403,13 +1225,10 @@ EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
1403 * Once all references to platform device are down to 0, 1225 * Once all references to platform device are down to 0,
1404 * release all allocated structures. 1226 * release all allocated structures.
1405 */ 1227 */
1406static void tpm_dev_release(struct device *dev) 1228void tpm_dev_release(struct device *dev)
1407{ 1229{
1408 struct tpm_chip *chip = dev_get_drvdata(dev); 1230 struct tpm_chip *chip = dev_get_drvdata(dev);
1409 1231
1410 if (!chip)
1411 return;
1412
1413 tpm_dev_vendor_release(chip); 1232 tpm_dev_vendor_release(chip);
1414 1233
1415 chip->release(dev); 1234 chip->release(dev);
@@ -1476,17 +1295,15 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1476 "unable to misc_register %s, minor %d\n", 1295 "unable to misc_register %s, minor %d\n",
1477 chip->vendor.miscdev.name, 1296 chip->vendor.miscdev.name,
1478 chip->vendor.miscdev.minor); 1297 chip->vendor.miscdev.minor);
1479 goto put_device; 1298 put_device(chip->dev);
1299 return NULL;
1480 } 1300 }
1481 1301
1482 if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { 1302 if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
1483 misc_deregister(&chip->vendor.miscdev); 1303 misc_deregister(&chip->vendor.miscdev);
1484 goto put_device; 1304 put_device(chip->dev);
1485 }
1486 1305
1487 if (tpm_add_ppi(&dev->kobj)) { 1306 return NULL;
1488 misc_deregister(&chip->vendor.miscdev);
1489 goto put_device;
1490 } 1307 }
1491 1308
1492 chip->bios_dir = tpm_bios_log_setup(devname); 1309 chip->bios_dir = tpm_bios_log_setup(devname);
@@ -1498,8 +1315,6 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1498 1315
1499 return chip; 1316 return chip;
1500 1317
1501put_device:
1502 put_device(chip->dev);
1503out_free: 1318out_free:
1504 kfree(chip); 1319 kfree(chip);
1505 kfree(devname); 1320 kfree(devname);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8ef7649a50a..9c4163cfa3c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -28,12 +28,6 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/tpm.h> 29#include <linux/tpm.h>
30 30
31enum tpm_const {
32 TPM_MINOR = 224, /* officially assigned */
33 TPM_BUFSIZE = 4096,
34 TPM_NUM_DEVICES = 256,
35};
36
37enum tpm_timeout { 31enum tpm_timeout {
38 TPM_TIMEOUT = 5, /* msecs */ 32 TPM_TIMEOUT = 5, /* msecs */
39}; 33};
@@ -44,11 +38,6 @@ enum tpm_addr {
44 TPM_ADDR = 0x4E, 38 TPM_ADDR = 0x4E,
45}; 39};
46 40
47#define TPM_WARN_DOING_SELFTEST 0x802
48#define TPM_ERR_DEACTIVATED 0x6
49#define TPM_ERR_DISABLED 0x7
50
51#define TPM_HEADER_SIZE 10
52extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr, 41extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
53 char *); 42 char *);
54extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr, 43extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
@@ -100,14 +89,11 @@ struct tpm_vendor_specific {
100 bool timeout_adjusted; 89 bool timeout_adjusted;
101 unsigned long duration[3]; /* jiffies */ 90 unsigned long duration[3]; /* jiffies */
102 bool duration_adjusted; 91 bool duration_adjusted;
103 void *data;
104 92
105 wait_queue_head_t read_queue; 93 wait_queue_head_t read_queue;
106 wait_queue_head_t int_queue; 94 wait_queue_head_t int_queue;
107}; 95};
108 96
109#define TPM_VID_INTEL 0x8086
110
111struct tpm_chip { 97struct tpm_chip {
112 struct device *dev; /* Device stuff */ 98 struct device *dev; /* Device stuff */
113 99
@@ -276,21 +262,6 @@ struct tpm_pcrextend_in {
276 u8 hash[TPM_DIGEST_SIZE]; 262 u8 hash[TPM_DIGEST_SIZE];
277}__attribute__((packed)); 263}__attribute__((packed));
278 264
279/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
280 * bytes, but 128 is still a relatively large number of random bytes and
281 * anything much bigger causes users of struct tpm_cmd_t to start getting
282 * compiler warnings about stack frame size. */
283#define TPM_MAX_RNG_DATA 128
284
285struct tpm_getrandom_out {
286 __be32 rng_data_len;
287 u8 rng_data[TPM_MAX_RNG_DATA];
288}__attribute__((packed));
289
290struct tpm_getrandom_in {
291 __be32 num_bytes;
292}__attribute__((packed));
293
294typedef union { 265typedef union {
295 struct tpm_getcap_params_out getcap_out; 266 struct tpm_getcap_params_out getcap_out;
296 struct tpm_readpubek_params_out readpubek_out; 267 struct tpm_readpubek_params_out readpubek_out;
@@ -299,8 +270,6 @@ typedef union {
299 struct tpm_pcrread_in pcrread_in; 270 struct tpm_pcrread_in pcrread_in;
300 struct tpm_pcrread_out pcrread_out; 271 struct tpm_pcrread_out pcrread_out;
301 struct tpm_pcrextend_in pcrextend_in; 272 struct tpm_pcrextend_in pcrextend_in;
302 struct tpm_getrandom_in getrandom_in;
303 struct tpm_getrandom_out getrandom_out;
304} tpm_cmd_params; 273} tpm_cmd_params;
305 274
306struct tpm_cmd_t { 275struct tpm_cmd_t {
@@ -310,9 +279,9 @@ struct tpm_cmd_t {
310 279
311ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); 280ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
312 281
313extern int tpm_get_timeouts(struct tpm_chip *); 282extern void tpm_get_timeouts(struct tpm_chip *);
314extern void tpm_gen_interrupt(struct tpm_chip *); 283extern void tpm_gen_interrupt(struct tpm_chip *);
315extern int tpm_do_selftest(struct tpm_chip *); 284extern void tpm_continue_selftest(struct tpm_chip *);
316extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32); 285extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
317extern struct tpm_chip* tpm_register_hardware(struct device *, 286extern struct tpm_chip* tpm_register_hardware(struct device *,
318 const struct tpm_vendor_specific *); 287 const struct tpm_vendor_specific *);
@@ -323,21 +292,18 @@ extern ssize_t tpm_write(struct file *, const char __user *, size_t,
323 loff_t *); 292 loff_t *);
324extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); 293extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
325extern void tpm_remove_hardware(struct device *); 294extern void tpm_remove_hardware(struct device *);
326extern int tpm_pm_suspend(struct device *); 295extern int tpm_pm_suspend(struct device *, pm_message_t);
327extern int tpm_pm_resume(struct device *); 296extern int tpm_pm_resume(struct device *);
328extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
329 wait_queue_head_t *);
330 297
331#ifdef CONFIG_ACPI 298#ifdef CONFIG_ACPI
332extern int tpm_add_ppi(struct kobject *); 299extern struct dentry ** tpm_bios_log_setup(char *);
333extern void tpm_remove_ppi(struct kobject *); 300extern void tpm_bios_log_teardown(struct dentry **);
334#else 301#else
335static inline int tpm_add_ppi(struct kobject *parent) 302static inline struct dentry ** tpm_bios_log_setup(char *name)
336{ 303{
337 return 0; 304 return NULL;
338} 305}
339 306static inline void tpm_bios_log_teardown(struct dentry **dir)
340static inline void tpm_remove_ppi(struct kobject *parent)
341{ 307{
342} 308}
343#endif 309#endif
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
deleted file mode 100644
index 56051d0c97a..00000000000
--- a/drivers/char/tpm/tpm_acpi.c
+++ /dev/null
@@ -1,109 +0,0 @@
1/*
2 * Copyright (C) 2005 IBM Corporation
3 *
4 * Authors:
5 * Seiji Munetoh <munetoh@jp.ibm.com>
6 * Stefan Berger <stefanb@us.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
9 *
10 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
11 *
12 * Access to the eventlog extended by the TCG BIOS of PC platform
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/seq_file.h>
22#include <linux/fs.h>
23#include <linux/security.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <acpi/acpi.h>
27
28#include "tpm.h"
29#include "tpm_eventlog.h"
30
31struct acpi_tcpa {
32 struct acpi_table_header hdr;
33 u16 platform_class;
34 union {
35 struct client_hdr {
36 u32 log_max_len __attribute__ ((packed));
37 u64 log_start_addr __attribute__ ((packed));
38 } client;
39 struct server_hdr {
40 u16 reserved;
41 u64 log_max_len __attribute__ ((packed));
42 u64 log_start_addr __attribute__ ((packed));
43 } server;
44 };
45};
46
47/* read binary bios log */
48int read_log(struct tpm_bios_log *log)
49{
50 struct acpi_tcpa *buff;
51 acpi_status status;
52 void __iomem *virt;
53 u64 len, start;
54
55 if (log->bios_event_log != NULL) {
56 printk(KERN_ERR
57 "%s: ERROR - Eventlog already initialized\n",
58 __func__);
59 return -EFAULT;
60 }
61
62 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
63 status = acpi_get_table(ACPI_SIG_TCPA, 1,
64 (struct acpi_table_header **)&buff);
65
66 if (ACPI_FAILURE(status)) {
67 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
68 __func__);
69 return -EIO;
70 }
71
72 switch(buff->platform_class) {
73 case BIOS_SERVER:
74 len = buff->server.log_max_len;
75 start = buff->server.log_start_addr;
76 break;
77 case BIOS_CLIENT:
78 default:
79 len = buff->client.log_max_len;
80 start = buff->client.log_start_addr;
81 break;
82 }
83 if (!len) {
84 printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
85 return -EIO;
86 }
87
88 /* malloc EventLog space */
89 log->bios_event_log = kmalloc(len, GFP_KERNEL);
90 if (!log->bios_event_log) {
91 printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
92 __func__);
93 return -ENOMEM;
94 }
95
96 log->bios_event_log_end = log->bios_event_log + len;
97
98 virt = acpi_os_map_memory(start, len);
99 if (!virt) {
100 kfree(log->bios_event_log);
101 printk("%s: ERROR - Unable to map memory\n", __func__);
102 return -EIO;
103 }
104
105 memcpy_fromio(log->bios_event_log, virt, len);
106
107 acpi_os_unmap_memory(virt, len);
108 return 0;
109}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 678d57019dc..c64a1bc6534 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -168,14 +168,22 @@ static void atml_plat_remove(void)
168 } 168 }
169} 169}
170 170
171static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume); 171static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg)
172{
173 return tpm_pm_suspend(&dev->dev, msg);
174}
172 175
176static int tpm_atml_resume(struct platform_device *dev)
177{
178 return tpm_pm_resume(&dev->dev);
179}
173static struct platform_driver atml_drv = { 180static struct platform_driver atml_drv = {
174 .driver = { 181 .driver = {
175 .name = "tpm_atmel", 182 .name = "tpm_atmel",
176 .owner = THIS_MODULE, 183 .owner = THIS_MODULE,
177 .pm = &tpm_atml_pm,
178 }, 184 },
185 .suspend = tpm_atml_suspend,
186 .resume = tpm_atml_resume,
179}; 187};
180 188
181static int __init init_atmel(void) 189static int __init init_atmel(void)
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
deleted file mode 100644
index 84ddc557b8f..00000000000
--- a/drivers/char/tpm/tpm_eventlog.c
+++ /dev/null
@@ -1,419 +0,0 @@
1/*
2 * Copyright (C) 2005, 2012 IBM Corporation
3 *
4 * Authors:
5 * Kent Yoder <key@linux.vnet.ibm.com>
6 * Seiji Munetoh <munetoh@jp.ibm.com>
7 * Stefan Berger <stefanb@us.ibm.com>
8 * Reiner Sailer <sailer@watson.ibm.com>
9 * Kylene Hall <kjhall@us.ibm.com>
10 *
11 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
12 *
13 * Access to the eventlog created by a system's firmware / BIOS
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/seq_file.h>
23#include <linux/fs.h>
24#include <linux/security.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27
28#include "tpm.h"
29#include "tpm_eventlog.h"
30
31
32static const char* tcpa_event_type_strings[] = {
33 "PREBOOT",
34 "POST CODE",
35 "",
36 "NO ACTION",
37 "SEPARATOR",
38 "ACTION",
39 "EVENT TAG",
40 "S-CRTM Contents",
41 "S-CRTM Version",
42 "CPU Microcode",
43 "Platform Config Flags",
44 "Table of Devices",
45 "Compact Hash",
46 "IPL",
47 "IPL Partition Data",
48 "Non-Host Code",
49 "Non-Host Config",
50 "Non-Host Info"
51};
52
53static const char* tcpa_pc_event_id_strings[] = {
54 "",
55 "SMBIOS",
56 "BIS Certificate",
57 "POST BIOS ",
58 "ESCD ",
59 "CMOS",
60 "NVRAM",
61 "Option ROM",
62 "Option ROM config",
63 "",
64 "Option ROM microcode ",
65 "S-CRTM Version",
66 "S-CRTM Contents ",
67 "POST Contents ",
68 "Table of Devices",
69};
70
71/* returns pointer to start of pos. entry of tcg log */
72static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
73{
74 loff_t i;
75 struct tpm_bios_log *log = m->private;
76 void *addr = log->bios_event_log;
77 void *limit = log->bios_event_log_end;
78 struct tcpa_event *event;
79
80 /* read over *pos measurements */
81 for (i = 0; i < *pos; i++) {
82 event = addr;
83
84 if ((addr + sizeof(struct tcpa_event)) < limit) {
85 if (event->event_type == 0 && event->event_size == 0)
86 return NULL;
87 addr += sizeof(struct tcpa_event) + event->event_size;
88 }
89 }
90
91 /* now check if current entry is valid */
92 if ((addr + sizeof(struct tcpa_event)) >= limit)
93 return NULL;
94
95 event = addr;
96
97 if ((event->event_type == 0 && event->event_size == 0) ||
98 ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
99 return NULL;
100
101 return addr;
102}
103
104static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
105 loff_t *pos)
106{
107 struct tcpa_event *event = v;
108 struct tpm_bios_log *log = m->private;
109 void *limit = log->bios_event_log_end;
110
111 v += sizeof(struct tcpa_event) + event->event_size;
112
113 /* now check if current entry is valid */
114 if ((v + sizeof(struct tcpa_event)) >= limit)
115 return NULL;
116
117 event = v;
118
119 if (event->event_type == 0 && event->event_size == 0)
120 return NULL;
121
122 if ((event->event_type == 0 && event->event_size == 0) ||
123 ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
124 return NULL;
125
126 (*pos)++;
127 return v;
128}
129
130static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
131{
132}
133
134static int get_event_name(char *dest, struct tcpa_event *event,
135 unsigned char * event_entry)
136{
137 const char *name = "";
138 /* 41 so there is room for 40 data and 1 nul */
139 char data[41] = "";
140 int i, n_len = 0, d_len = 0;
141 struct tcpa_pc_event *pc_event;
142
143 switch(event->event_type) {
144 case PREBOOT:
145 case POST_CODE:
146 case UNUSED:
147 case NO_ACTION:
148 case SCRTM_CONTENTS:
149 case SCRTM_VERSION:
150 case CPU_MICROCODE:
151 case PLATFORM_CONFIG_FLAGS:
152 case TABLE_OF_DEVICES:
153 case COMPACT_HASH:
154 case IPL:
155 case IPL_PARTITION_DATA:
156 case NONHOST_CODE:
157 case NONHOST_CONFIG:
158 case NONHOST_INFO:
159 name = tcpa_event_type_strings[event->event_type];
160 n_len = strlen(name);
161 break;
162 case SEPARATOR:
163 case ACTION:
164 if (MAX_TEXT_EVENT > event->event_size) {
165 name = event_entry;
166 n_len = event->event_size;
167 }
168 break;
169 case EVENT_TAG:
170 pc_event = (struct tcpa_pc_event *)event_entry;
171
172 /* ToDo Row data -> Base64 */
173
174 switch (pc_event->event_id) {
175 case SMBIOS:
176 case BIS_CERT:
177 case CMOS:
178 case NVRAM:
179 case OPTION_ROM_EXEC:
180 case OPTION_ROM_CONFIG:
181 case S_CRTM_VERSION:
182 name = tcpa_pc_event_id_strings[pc_event->event_id];
183 n_len = strlen(name);
184 break;
185 /* hash data */
186 case POST_BIOS_ROM:
187 case ESCD:
188 case OPTION_ROM_MICROCODE:
189 case S_CRTM_CONTENTS:
190 case POST_CONTENTS:
191 name = tcpa_pc_event_id_strings[pc_event->event_id];
192 n_len = strlen(name);
193 for (i = 0; i < 20; i++)
194 d_len += sprintf(&data[2*i], "%02x",
195 pc_event->event_data[i]);
196 break;
197 default:
198 break;
199 }
200 default:
201 break;
202 }
203
204 return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
205 n_len, name, d_len, data);
206
207}
208
209static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
210{
211 struct tcpa_event *event = v;
212 char *data = v;
213 int i;
214
215 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
216 seq_putc(m, data[i]);
217
218 return 0;
219}
220
221static int tpm_bios_measurements_release(struct inode *inode,
222 struct file *file)
223{
224 struct seq_file *seq = file->private_data;
225 struct tpm_bios_log *log = seq->private;
226
227 if (log) {
228 kfree(log->bios_event_log);
229 kfree(log);
230 }
231
232 return seq_release(inode, file);
233}
234
235static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
236{
237 int len = 0;
238 int i;
239 char *eventname;
240 struct tcpa_event *event = v;
241 unsigned char *event_entry =
242 (unsigned char *) (v + sizeof(struct tcpa_event));
243
244 eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
245 if (!eventname) {
246 printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
247 __func__);
248 return -EFAULT;
249 }
250
251 seq_printf(m, "%2d ", event->pcr_index);
252
253 /* 2nd: SHA1 */
254 for (i = 0; i < 20; i++)
255 seq_printf(m, "%02x", event->pcr_value[i]);
256
257 /* 3rd: event type identifier */
258 seq_printf(m, " %02x", event->event_type);
259
260 len += get_event_name(eventname, event, event_entry);
261
262 /* 4th: eventname <= max + \'0' delimiter */
263 seq_printf(m, " %s\n", eventname);
264
265 kfree(eventname);
266 return 0;
267}
268
269static const struct seq_operations tpm_ascii_b_measurments_seqops = {
270 .start = tpm_bios_measurements_start,
271 .next = tpm_bios_measurements_next,
272 .stop = tpm_bios_measurements_stop,
273 .show = tpm_ascii_bios_measurements_show,
274};
275
276static const struct seq_operations tpm_binary_b_measurments_seqops = {
277 .start = tpm_bios_measurements_start,
278 .next = tpm_bios_measurements_next,
279 .stop = tpm_bios_measurements_stop,
280 .show = tpm_binary_bios_measurements_show,
281};
282
283static int tpm_ascii_bios_measurements_open(struct inode *inode,
284 struct file *file)
285{
286 int err;
287 struct tpm_bios_log *log;
288 struct seq_file *seq;
289
290 log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
291 if (!log)
292 return -ENOMEM;
293
294 if ((err = read_log(log)))
295 goto out_free;
296
297 /* now register seq file */
298 err = seq_open(file, &tpm_ascii_b_measurments_seqops);
299 if (!err) {
300 seq = file->private_data;
301 seq->private = log;
302 } else {
303 goto out_free;
304 }
305
306out:
307 return err;
308out_free:
309 kfree(log->bios_event_log);
310 kfree(log);
311 goto out;
312}
313
314static const struct file_operations tpm_ascii_bios_measurements_ops = {
315 .open = tpm_ascii_bios_measurements_open,
316 .read = seq_read,
317 .llseek = seq_lseek,
318 .release = tpm_bios_measurements_release,
319};
320
321static int tpm_binary_bios_measurements_open(struct inode *inode,
322 struct file *file)
323{
324 int err;
325 struct tpm_bios_log *log;
326 struct seq_file *seq;
327
328 log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
329 if (!log)
330 return -ENOMEM;
331
332 if ((err = read_log(log)))
333 goto out_free;
334
335 /* now register seq file */
336 err = seq_open(file, &tpm_binary_b_measurments_seqops);
337 if (!err) {
338 seq = file->private_data;
339 seq->private = log;
340 } else {
341 goto out_free;
342 }
343
344out:
345 return err;
346out_free:
347 kfree(log->bios_event_log);
348 kfree(log);
349 goto out;
350}
351
352static const struct file_operations tpm_binary_bios_measurements_ops = {
353 .open = tpm_binary_bios_measurements_open,
354 .read = seq_read,
355 .llseek = seq_lseek,
356 .release = tpm_bios_measurements_release,
357};
358
359static int is_bad(void *p)
360{
361 if (!p)
362 return 1;
363 if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
364 return 1;
365 return 0;
366}
367
368struct dentry **tpm_bios_log_setup(char *name)
369{
370 struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
371
372 tpm_dir = securityfs_create_dir(name, NULL);
373 if (is_bad(tpm_dir))
374 goto out;
375
376 bin_file =
377 securityfs_create_file("binary_bios_measurements",
378 S_IRUSR | S_IRGRP, tpm_dir, NULL,
379 &tpm_binary_bios_measurements_ops);
380 if (is_bad(bin_file))
381 goto out_tpm;
382
383 ascii_file =
384 securityfs_create_file("ascii_bios_measurements",
385 S_IRUSR | S_IRGRP, tpm_dir, NULL,
386 &tpm_ascii_bios_measurements_ops);
387 if (is_bad(ascii_file))
388 goto out_bin;
389
390 ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
391 if (!ret)
392 goto out_ascii;
393
394 ret[0] = ascii_file;
395 ret[1] = bin_file;
396 ret[2] = tpm_dir;
397
398 return ret;
399
400out_ascii:
401 securityfs_remove(ascii_file);
402out_bin:
403 securityfs_remove(bin_file);
404out_tpm:
405 securityfs_remove(tpm_dir);
406out:
407 return NULL;
408}
409EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
410
411void tpm_bios_log_teardown(struct dentry **lst)
412{
413 int i;
414
415 for (i = 0; i < 3; i++)
416 securityfs_remove(lst[i]);
417}
418EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
419MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
deleted file mode 100644
index e7da086d692..00000000000
--- a/drivers/char/tpm/tpm_eventlog.h
+++ /dev/null
@@ -1,86 +0,0 @@
1
2#ifndef __TPM_EVENTLOG_H__
3#define __TPM_EVENTLOG_H__
4
5#define TCG_EVENT_NAME_LEN_MAX 255
6#define MAX_TEXT_EVENT 1000 /* Max event string length */
7#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
8
9enum bios_platform_class {
10 BIOS_CLIENT = 0x00,
11 BIOS_SERVER = 0x01,
12};
13
14struct tpm_bios_log {
15 void *bios_event_log;
16 void *bios_event_log_end;
17};
18
19struct tcpa_event {
20 u32 pcr_index;
21 u32 event_type;
22 u8 pcr_value[20]; /* SHA1 */
23 u32 event_size;
24 u8 event_data[0];
25};
26
27enum tcpa_event_types {
28 PREBOOT = 0,
29 POST_CODE,
30 UNUSED,
31 NO_ACTION,
32 SEPARATOR,
33 ACTION,
34 EVENT_TAG,
35 SCRTM_CONTENTS,
36 SCRTM_VERSION,
37 CPU_MICROCODE,
38 PLATFORM_CONFIG_FLAGS,
39 TABLE_OF_DEVICES,
40 COMPACT_HASH,
41 IPL,
42 IPL_PARTITION_DATA,
43 NONHOST_CODE,
44 NONHOST_CONFIG,
45 NONHOST_INFO,
46};
47
48struct tcpa_pc_event {
49 u32 event_id;
50 u32 event_size;
51 u8 event_data[0];
52};
53
54enum tcpa_pc_event_ids {
55 SMBIOS = 1,
56 BIS_CERT,
57 POST_BIOS_ROM,
58 ESCD,
59 CMOS,
60 NVRAM,
61 OPTION_ROM_EXEC,
62 OPTION_ROM_CONFIG,
63 OPTION_ROM_MICROCODE = 10,
64 S_CRTM_VERSION,
65 S_CRTM_CONTENTS,
66 POST_CONTENTS,
67 HOST_TABLE_OF_DEVICES,
68};
69
70int read_log(struct tpm_bios_log *log);
71
72#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
73 defined(CONFIG_ACPI)
74extern struct dentry **tpm_bios_log_setup(char *);
75extern void tpm_bios_log_teardown(struct dentry **);
76#else
77static inline struct dentry **tpm_bios_log_setup(char *name)
78{
79 return NULL;
80}
81static inline void tpm_bios_log_teardown(struct dentry **dir)
82{
83}
84#endif
85
86#endif
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
deleted file mode 100644
index fb447bd0cb6..00000000000
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ /dev/null
@@ -1,695 +0,0 @@
1/*
2 * Copyright (C) 2012 Infineon Technologies
3 *
4 * Authors:
5 * Peter Huewe <peter.huewe@infineon.com>
6 *
7 * Device driver for TCG/TCPA TPM (trusted platform module).
8 * Specifications at www.trustedcomputinggroup.org
9 *
10 * This device driver implements the TPM interface as defined in
11 * the TCG TPM Interface Spec version 1.2, revision 1.0 and the
12 * Infineon I2C Protocol Stack Specification v0.20.
13 *
14 * It is based on the original tpm_tis device driver from Leendert van
15 * Dorn and Kyleen Hall.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation, version 2 of the
20 * License.
21 *
22 *
23 */
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/wait.h>
29#include "tpm.h"
30
31/* max. buffer size supported by our TPM */
32#define TPM_BUFSIZE 1260
33
34/* max. number of iterations after I2C NAK */
35#define MAX_COUNT 3
36
37#define SLEEP_DURATION_LOW 55
38#define SLEEP_DURATION_HI 65
39
40/* max. number of iterations after I2C NAK for 'long' commands
41 * we need this especially for sending TPM_READY, since the cleanup after the
42 * transtion to the ready state may take some time, but it is unpredictable
43 * how long it will take.
44 */
45#define MAX_COUNT_LONG 50
46
47#define SLEEP_DURATION_LONG_LOW 200
48#define SLEEP_DURATION_LONG_HI 220
49
50/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */
51#define SLEEP_DURATION_RESET_LOW 2400
52#define SLEEP_DURATION_RESET_HI 2600
53
54/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */
55#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000)
56#define TPM_TIMEOUT_US_HI (TPM_TIMEOUT_US_LOW + 2000)
57
58/* expected value for DIDVID register */
59#define TPM_TIS_I2C_DID_VID 0x000b15d1L
60
61/* Structure to store I2C TPM specific stuff */
62struct tpm_inf_dev {
63 struct i2c_client *client;
64 u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
65 struct tpm_chip *chip;
66};
67
68static struct tpm_inf_dev tpm_dev;
69static struct i2c_driver tpm_tis_i2c_driver;
70
71/*
72 * iic_tpm_read() - read from TPM register
73 * @addr: register address to read from
74 * @buffer: provided by caller
75 * @len: number of bytes to read
76 *
77 * Read len bytes from TPM register and put them into
78 * buffer (little-endian format, i.e. first byte is put into buffer[0]).
79 *
80 * NOTE: TPM is big-endian for multi-byte values. Multi-byte
81 * values have to be swapped.
82 *
83 * NOTE: We can't unfortunately use the combined read/write functions
84 * provided by the i2c core as the TPM currently does not support the
85 * repeated start condition and due to it's special requirements.
86 * The i2c_smbus* functions do not work for this chip.
87 *
88 * Return -EIO on error, 0 on success.
89 */
90static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
91{
92
93 struct i2c_msg msg1 = { tpm_dev.client->addr, 0, 1, &addr };
94 struct i2c_msg msg2 = { tpm_dev.client->addr, I2C_M_RD, len, buffer };
95
96 int rc;
97 int count;
98
99 /* Lock the adapter for the duration of the whole sequence. */
100 if (!tpm_dev.client->adapter->algo->master_xfer)
101 return -EOPNOTSUPP;
102 i2c_lock_adapter(tpm_dev.client->adapter);
103
104 for (count = 0; count < MAX_COUNT; count++) {
105 rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
106 if (rc > 0)
107 break; /* break here to skip sleep */
108
109 usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
110 }
111
112 if (rc <= 0)
113 goto out;
114
115 /* After the TPM has successfully received the register address it needs
116 * some time, thus we're sleeping here again, before retrieving the data
117 */
118 for (count = 0; count < MAX_COUNT; count++) {
119 usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
120 rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1);
121 if (rc > 0)
122 break;
123
124 }
125
126out:
127 i2c_unlock_adapter(tpm_dev.client->adapter);
128 if (rc <= 0)
129 return -EIO;
130
131 return 0;
132}
133
134static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
135 unsigned int sleep_low,
136 unsigned int sleep_hi, u8 max_count)
137{
138 int rc = -EIO;
139 int count;
140
141 struct i2c_msg msg1 = { tpm_dev.client->addr, 0, len + 1, tpm_dev.buf };
142
143 if (len > TPM_BUFSIZE)
144 return -EINVAL;
145
146 if (!tpm_dev.client->adapter->algo->master_xfer)
147 return -EOPNOTSUPP;
148 i2c_lock_adapter(tpm_dev.client->adapter);
149
150 /* prepend the 'register address' to the buffer */
151 tpm_dev.buf[0] = addr;
152 memcpy(&(tpm_dev.buf[1]), buffer, len);
153
154 /*
155 * NOTE: We have to use these special mechanisms here and unfortunately
156 * cannot rely on the standard behavior of i2c_transfer.
157 */
158 for (count = 0; count < max_count; count++) {
159 rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
160 if (rc > 0)
161 break;
162
163 usleep_range(sleep_low, sleep_hi);
164 }
165
166 i2c_unlock_adapter(tpm_dev.client->adapter);
167 if (rc <= 0)
168 return -EIO;
169
170 return 0;
171}
172
173/*
174 * iic_tpm_write() - write to TPM register
175 * @addr: register address to write to
176 * @buffer: containing data to be written
177 * @len: number of bytes to write
178 *
179 * Write len bytes from provided buffer to TPM register (little
180 * endian format, i.e. buffer[0] is written as first byte).
181 *
182 * NOTE: TPM is big-endian for multi-byte values. Multi-byte
183 * values have to be swapped.
184 *
185 * NOTE: use this function instead of the iic_tpm_write_generic function.
186 *
187 * Return -EIO on error, 0 on success
188 */
189static int iic_tpm_write(u8 addr, u8 *buffer, size_t len)
190{
191 return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW,
192 SLEEP_DURATION_HI, MAX_COUNT);
193}
194
195/*
196 * This function is needed especially for the cleanup situation after
197 * sending TPM_READY
198 * */
199static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len)
200{
201 return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW,
202 SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG);
203}
204
205enum tis_access {
206 TPM_ACCESS_VALID = 0x80,
207 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
208 TPM_ACCESS_REQUEST_PENDING = 0x04,
209 TPM_ACCESS_REQUEST_USE = 0x02,
210};
211
212enum tis_status {
213 TPM_STS_VALID = 0x80,
214 TPM_STS_COMMAND_READY = 0x40,
215 TPM_STS_GO = 0x20,
216 TPM_STS_DATA_AVAIL = 0x10,
217 TPM_STS_DATA_EXPECT = 0x08,
218};
219
220enum tis_defaults {
221 TIS_SHORT_TIMEOUT = 750, /* ms */
222 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
223};
224
225#define TPM_ACCESS(l) (0x0000 | ((l) << 4))
226#define TPM_STS(l) (0x0001 | ((l) << 4))
227#define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4))
228#define TPM_DID_VID(l) (0x0006 | ((l) << 4))
229
230static int check_locality(struct tpm_chip *chip, int loc)
231{
232 u8 buf;
233 int rc;
234
235 rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1);
236 if (rc < 0)
237 return rc;
238
239 if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
240 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
241 chip->vendor.locality = loc;
242 return loc;
243 }
244
245 return -EIO;
246}
247
248/* implementation similar to tpm_tis */
249static void release_locality(struct tpm_chip *chip, int loc, int force)
250{
251 u8 buf;
252 if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0)
253 return;
254
255 if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
256 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) {
257 buf = TPM_ACCESS_ACTIVE_LOCALITY;
258 iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
259 }
260}
261
262static int request_locality(struct tpm_chip *chip, int loc)
263{
264 unsigned long stop;
265 u8 buf = TPM_ACCESS_REQUEST_USE;
266
267 if (check_locality(chip, loc) >= 0)
268 return loc;
269
270 iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
271
272 /* wait for burstcount */
273 stop = jiffies + chip->vendor.timeout_a;
274 do {
275 if (check_locality(chip, loc) >= 0)
276 return loc;
277 usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
278 } while (time_before(jiffies, stop));
279
280 return -ETIME;
281}
282
283static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
284{
285 /* NOTE: since I2C read may fail, return 0 in this case --> time-out */
286 u8 buf;
287 if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0)
288 return 0;
289 else
290 return buf;
291}
292
293static void tpm_tis_i2c_ready(struct tpm_chip *chip)
294{
295 /* this causes the current command to be aborted */
296 u8 buf = TPM_STS_COMMAND_READY;
297 iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1);
298}
299
300static ssize_t get_burstcount(struct tpm_chip *chip)
301{
302 unsigned long stop;
303 ssize_t burstcnt;
304 u8 buf[3];
305
306 /* wait for burstcount */
307 /* which timeout value, spec has 2 answers (c & d) */
308 stop = jiffies + chip->vendor.timeout_d;
309 do {
310 /* Note: STS is little endian */
311 if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0)
312 burstcnt = 0;
313 else
314 burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
315
316 if (burstcnt)
317 return burstcnt;
318
319 usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
320 } while (time_before(jiffies, stop));
321 return -EBUSY;
322}
323
324static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
325 int *status)
326{
327 unsigned long stop;
328
329 /* check current status */
330 *status = tpm_tis_i2c_status(chip);
331 if ((*status & mask) == mask)
332 return 0;
333
334 stop = jiffies + timeout;
335 do {
336 /* since we just checked the status, give the TPM some time */
337 usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
338 *status = tpm_tis_i2c_status(chip);
339 if ((*status & mask) == mask)
340 return 0;
341
342 } while (time_before(jiffies, stop));
343
344 return -ETIME;
345}
346
347static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
348{
349 size_t size = 0;
350 ssize_t burstcnt;
351 u8 retries = 0;
352 int rc;
353
354 while (size < count) {
355 burstcnt = get_burstcount(chip);
356
357 /* burstcnt < 0 = TPM is busy */
358 if (burstcnt < 0)
359 return burstcnt;
360
361 /* limit received data to max. left */
362 if (burstcnt > (count - size))
363 burstcnt = count - size;
364
365 rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality),
366 &(buf[size]), burstcnt);
367 if (rc == 0)
368 size += burstcnt;
369 else if (rc < 0)
370 retries++;
371
372 /* avoid endless loop in case of broken HW */
373 if (retries > MAX_COUNT_LONG)
374 return -EIO;
375
376 }
377 return size;
378}
379
380static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
381{
382 int size = 0;
383 int expected, status;
384
385 if (count < TPM_HEADER_SIZE) {
386 size = -EIO;
387 goto out;
388 }
389
390 /* read first 10 bytes, including tag, paramsize, and result */
391 size = recv_data(chip, buf, TPM_HEADER_SIZE);
392 if (size < TPM_HEADER_SIZE) {
393 dev_err(chip->dev, "Unable to read header\n");
394 goto out;
395 }
396
397 expected = be32_to_cpu(*(__be32 *)(buf + 2));
398 if ((size_t) expected > count) {
399 size = -EIO;
400 goto out;
401 }
402
403 size += recv_data(chip, &buf[TPM_HEADER_SIZE],
404 expected - TPM_HEADER_SIZE);
405 if (size < expected) {
406 dev_err(chip->dev, "Unable to read remainder of result\n");
407 size = -ETIME;
408 goto out;
409 }
410
411 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
412 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
413 dev_err(chip->dev, "Error left over data\n");
414 size = -EIO;
415 goto out;
416 }
417
418out:
419 tpm_tis_i2c_ready(chip);
420 /* The TPM needs some time to clean up here,
421 * so we sleep rather than keeping the bus busy
422 */
423 usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
424 release_locality(chip, chip->vendor.locality, 0);
425 return size;
426}
427
428static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
429{
430 int rc, status;
431 ssize_t burstcnt;
432 size_t count = 0;
433 u8 retries = 0;
434 u8 sts = TPM_STS_GO;
435
436 if (len > TPM_BUFSIZE)
437 return -E2BIG; /* command is too long for our tpm, sorry */
438
439 if (request_locality(chip, 0) < 0)
440 return -EBUSY;
441
442 status = tpm_tis_i2c_status(chip);
443 if ((status & TPM_STS_COMMAND_READY) == 0) {
444 tpm_tis_i2c_ready(chip);
445 if (wait_for_stat
446 (chip, TPM_STS_COMMAND_READY,
447 chip->vendor.timeout_b, &status) < 0) {
448 rc = -ETIME;
449 goto out_err;
450 }
451 }
452
453 while (count < len - 1) {
454 burstcnt = get_burstcount(chip);
455
456 /* burstcnt < 0 = TPM is busy */
457 if (burstcnt < 0)
458 return burstcnt;
459
460 if (burstcnt > (len - 1 - count))
461 burstcnt = len - 1 - count;
462
463 rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality),
464 &(buf[count]), burstcnt);
465 if (rc == 0)
466 count += burstcnt;
467 else if (rc < 0)
468 retries++;
469
470 /* avoid endless loop in case of broken HW */
471 if (retries > MAX_COUNT_LONG) {
472 rc = -EIO;
473 goto out_err;
474 }
475
476 wait_for_stat(chip, TPM_STS_VALID,
477 chip->vendor.timeout_c, &status);
478
479 if ((status & TPM_STS_DATA_EXPECT) == 0) {
480 rc = -EIO;
481 goto out_err;
482 }
483
484 }
485
486 /* write last byte */
487 iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1);
488 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
489 if ((status & TPM_STS_DATA_EXPECT) != 0) {
490 rc = -EIO;
491 goto out_err;
492 }
493
494 /* go and do it */
495 iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1);
496
497 return len;
498out_err:
499 tpm_tis_i2c_ready(chip);
500 /* The TPM needs some time to clean up here,
501 * so we sleep rather than keeping the bus busy
502 */
503 usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
504 release_locality(chip, chip->vendor.locality, 0);
505 return rc;
506}
507
508static const struct file_operations tis_ops = {
509 .owner = THIS_MODULE,
510 .llseek = no_llseek,
511 .open = tpm_open,
512 .read = tpm_read,
513 .write = tpm_write,
514 .release = tpm_release,
515};
516
517static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
518static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
519static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
520static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
521static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
522static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
523static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
524static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
525static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
526static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
527
528static struct attribute *tis_attrs[] = {
529 &dev_attr_pubek.attr,
530 &dev_attr_pcrs.attr,
531 &dev_attr_enabled.attr,
532 &dev_attr_active.attr,
533 &dev_attr_owned.attr,
534 &dev_attr_temp_deactivated.attr,
535 &dev_attr_caps.attr,
536 &dev_attr_cancel.attr,
537 &dev_attr_durations.attr,
538 &dev_attr_timeouts.attr,
539 NULL,
540};
541
542static struct attribute_group tis_attr_grp = {
543 .attrs = tis_attrs
544};
545
546static struct tpm_vendor_specific tpm_tis_i2c = {
547 .status = tpm_tis_i2c_status,
548 .recv = tpm_tis_i2c_recv,
549 .send = tpm_tis_i2c_send,
550 .cancel = tpm_tis_i2c_ready,
551 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
552 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
553 .req_canceled = TPM_STS_COMMAND_READY,
554 .attr_group = &tis_attr_grp,
555 .miscdev.fops = &tis_ops,
556};
557
558static int tpm_tis_i2c_init(struct device *dev)
559{
560 u32 vendor;
561 int rc = 0;
562 struct tpm_chip *chip;
563
564 chip = tpm_register_hardware(dev, &tpm_tis_i2c);
565 if (!chip) {
566 rc = -ENODEV;
567 goto out_err;
568 }
569
570 /* Disable interrupts */
571 chip->vendor.irq = 0;
572
573 /* Default timeouts */
574 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
575 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
576 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
577 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
578
579 if (request_locality(chip, 0) != 0) {
580 rc = -ENODEV;
581 goto out_vendor;
582 }
583
584 /* read four bytes from DID_VID register */
585 if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
586 rc = -EIO;
587 goto out_release;
588 }
589
590 /* create DID_VID register value, after swapping to little-endian */
591 vendor = be32_to_cpu((__be32) vendor);
592
593 if (vendor != TPM_TIS_I2C_DID_VID) {
594 rc = -ENODEV;
595 goto out_release;
596 }
597
598 dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
599
600 INIT_LIST_HEAD(&chip->vendor.list);
601 tpm_dev.chip = chip;
602
603 tpm_get_timeouts(chip);
604 tpm_do_selftest(chip);
605
606 return 0;
607
608out_release:
609 release_locality(chip, chip->vendor.locality, 1);
610
611out_vendor:
612 /* close file handles */
613 tpm_dev_vendor_release(chip);
614
615 /* remove hardware */
616 tpm_remove_hardware(chip->dev);
617
618 /* reset these pointers, otherwise we oops */
619 chip->dev->release = NULL;
620 chip->release = NULL;
621 tpm_dev.client = NULL;
622 dev_set_drvdata(chip->dev, chip);
623out_err:
624 return rc;
625}
626
627static const struct i2c_device_id tpm_tis_i2c_table[] = {
628 {"tpm_i2c_infineon", 0},
629 {},
630};
631
632MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
633static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
634
635static int tpm_tis_i2c_probe(struct i2c_client *client,
636 const struct i2c_device_id *id)
637{
638 int rc;
639 if (tpm_dev.client != NULL)
640 return -EBUSY; /* We only support one client */
641
642 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
643 dev_err(&client->dev,
644 "no algorithms associated to the i2c bus\n");
645 return -ENODEV;
646 }
647
648 client->driver = &tpm_tis_i2c_driver;
649 tpm_dev.client = client;
650 rc = tpm_tis_i2c_init(&client->dev);
651 if (rc != 0) {
652 client->driver = NULL;
653 tpm_dev.client = NULL;
654 rc = -ENODEV;
655 }
656 return rc;
657}
658
659static int tpm_tis_i2c_remove(struct i2c_client *client)
660{
661 struct tpm_chip *chip = tpm_dev.chip;
662 release_locality(chip, chip->vendor.locality, 1);
663
664 /* close file handles */
665 tpm_dev_vendor_release(chip);
666
667 /* remove hardware */
668 tpm_remove_hardware(chip->dev);
669
670 /* reset these pointers, otherwise we oops */
671 chip->dev->release = NULL;
672 chip->release = NULL;
673 tpm_dev.client = NULL;
674 dev_set_drvdata(chip->dev, chip);
675
676 return 0;
677}
678
679static struct i2c_driver tpm_tis_i2c_driver = {
680
681 .id_table = tpm_tis_i2c_table,
682 .probe = tpm_tis_i2c_probe,
683 .remove = tpm_tis_i2c_remove,
684 .driver = {
685 .name = "tpm_i2c_infineon",
686 .owner = THIS_MODULE,
687 .pm = &tpm_tis_i2c_ops,
688 },
689};
690
691module_i2c_driver(tpm_tis_i2c_driver);
692MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
693MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
694MODULE_VERSION("2.1.5");
695MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
deleted file mode 100644
index 9978609d93b..00000000000
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ /dev/null
@@ -1,724 +0,0 @@
1/*
2 * Copyright (C) 2012 IBM Corporation
3 *
4 * Author: Ashley Lai <adlai@us.ibm.com>
5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the
14 * License.
15 *
16 */
17
18#include <linux/dma-mapping.h>
19#include <linux/dmapool.h>
20#include <linux/slab.h>
21#include <asm/vio.h>
22#include <asm/irq.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/wait.h>
28#include <asm/prom.h>
29
30#include "tpm.h"
31#include "tpm_ibmvtpm.h"
32
33static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34
35static struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" }
38};
39MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40
41/**
42 * ibmvtpm_send_crq - Send a CRQ request
43 * @vdev: vio device struct
44 * @w1: first word
45 * @w2: second word
46 *
47 * Return value:
48 * 0 -Sucess
49 * Non-zero - Failure
50 */
51static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
52{
53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
54}
55
56/**
57 * ibmvtpm_get_data - Retrieve ibm vtpm data
58 * @dev: device struct
59 *
60 * Return value:
61 * vtpm device struct
62 */
63static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
64{
65 struct tpm_chip *chip = dev_get_drvdata(dev);
66 if (chip)
67 return (struct ibmvtpm_dev *)chip->vendor.data;
68 return NULL;
69}
70
71/**
72 * tpm_ibmvtpm_recv - Receive data after send
73 * @chip: tpm chip struct
74 * @buf: buffer to read
75 * count: size of buffer
76 *
77 * Return value:
78 * Number of bytes read
79 */
80static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
81{
82 struct ibmvtpm_dev *ibmvtpm;
83 u16 len;
84 int sig;
85
86 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
87
88 if (!ibmvtpm->rtce_buf) {
89 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
90 return 0;
91 }
92
93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
94 if (sig)
95 return -EINTR;
96
97 len = ibmvtpm->res_len;
98
99 if (count < len) {
100 dev_err(ibmvtpm->dev,
101 "Invalid size in recv: count=%ld, crq_size=%d\n",
102 count, len);
103 return -EIO;
104 }
105
106 spin_lock(&ibmvtpm->rtce_lock);
107 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
108 memset(ibmvtpm->rtce_buf, 0, len);
109 ibmvtpm->res_len = 0;
110 spin_unlock(&ibmvtpm->rtce_lock);
111 return len;
112}
113
114/**
115 * tpm_ibmvtpm_send - Send tpm request
116 * @chip: tpm chip struct
117 * @buf: buffer contains data to send
118 * count: size of buffer
119 *
120 * Return value:
121 * Number of bytes sent
122 */
123static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124{
125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq;
128 int rc;
129
130 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
131
132 if (!ibmvtpm->rtce_buf) {
133 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134 return 0;
135 }
136
137 if (count > ibmvtpm->rtce_size) {
138 dev_err(ibmvtpm->dev,
139 "Invalid size in send: count=%ld, rtce_size=%d\n",
140 count, ibmvtpm->rtce_size);
141 return -EIO;
142 }
143
144 spin_lock(&ibmvtpm->rtce_lock);
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count;
149 crq.data = ibmvtpm->rtce_dma_handle;
150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
152 if (rc != H_SUCCESS) {
153 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
154 rc = 0;
155 } else
156 rc = count;
157
158 spin_unlock(&ibmvtpm->rtce_lock);
159 return rc;
160}
161
162static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
163{
164 return;
165}
166
167static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
168{
169 return 0;
170}
171
172/**
173 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
174 * @ibmvtpm: vtpm device struct
175 *
176 * Return value:
177 * 0 - Success
178 * Non-zero - Failure
179 */
180static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
181{
182 struct ibmvtpm_crq crq;
183 u64 *buf = (u64 *) &crq;
184 int rc;
185
186 crq.valid = (u8)IBMVTPM_VALID_CMD;
187 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
188
189 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
190 if (rc != H_SUCCESS)
191 dev_err(ibmvtpm->dev,
192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
193
194 return rc;
195}
196
197/**
198 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
199 * - Note that this is vtpm version and not tpm version
200 * @ibmvtpm: vtpm device struct
201 *
202 * Return value:
203 * 0 - Success
204 * Non-zero - Failure
205 */
206static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
207{
208 struct ibmvtpm_crq crq;
209 u64 *buf = (u64 *) &crq;
210 int rc;
211
212 crq.valid = (u8)IBMVTPM_VALID_CMD;
213 crq.msg = (u8)VTPM_GET_VERSION;
214
215 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
216 if (rc != H_SUCCESS)
217 dev_err(ibmvtpm->dev,
218 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
219
220 return rc;
221}
222
223/**
224 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
225 * @ibmvtpm: vtpm device struct
226 *
227 * Return value:
228 * 0 - Success
229 * Non-zero - Failure
230 */
231static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
232{
233 int rc;
234
235 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
236 if (rc != H_SUCCESS)
237 dev_err(ibmvtpm->dev,
238 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
239
240 return rc;
241}
242
243/**
244 * ibmvtpm_crq_send_init - Send a CRQ initialize message
245 * @ibmvtpm: vtpm device struct
246 *
247 * Return value:
248 * 0 - Success
249 * Non-zero - Failure
250 */
251static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
252{
253 int rc;
254
255 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
256 if (rc != H_SUCCESS)
257 dev_err(ibmvtpm->dev,
258 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
259
260 return rc;
261}
262
263/**
264 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
265 * @vdev: vio device struct
266 *
267 * Return value:
268 * 0
269 */
270static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
271{
272 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
273 int rc = 0;
274
275 free_irq(vdev->irq, ibmvtpm);
276
277 do {
278 if (rc)
279 msleep(100);
280 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
281 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
282
283 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
284 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
285 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
286
287 if (ibmvtpm->rtce_buf) {
288 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
289 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
290 kfree(ibmvtpm->rtce_buf);
291 }
292
293 tpm_remove_hardware(ibmvtpm->dev);
294
295 kfree(ibmvtpm);
296
297 return 0;
298}
299
300/**
301 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
302 * @vdev: vio device struct
303 *
304 * Return value:
305 * Number of bytes the driver needs to DMA map
306 */
307static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
308{
309 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
310 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
311}
312
313/**
314 * tpm_ibmvtpm_suspend - Suspend
315 * @dev: device struct
316 *
317 * Return value:
318 * 0
319 */
320static int tpm_ibmvtpm_suspend(struct device *dev)
321{
322 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
323 struct ibmvtpm_crq crq;
324 u64 *buf = (u64 *) &crq;
325 int rc = 0;
326
327 crq.valid = (u8)IBMVTPM_VALID_CMD;
328 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
329
330 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
331 if (rc != H_SUCCESS)
332 dev_err(ibmvtpm->dev,
333 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
334
335 return rc;
336}
337
338/**
339 * ibmvtpm_reset_crq - Reset CRQ
340 * @ibmvtpm: ibm vtpm struct
341 *
342 * Return value:
343 * 0 - Success
344 * Non-zero - Failure
345 */
346static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
347{
348 int rc = 0;
349
350 do {
351 if (rc)
352 msleep(100);
353 rc = plpar_hcall_norets(H_FREE_CRQ,
354 ibmvtpm->vdev->unit_address);
355 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
356
357 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
358 ibmvtpm->crq_queue.index = 0;
359
360 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
361 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
362}
363
364/**
365 * tpm_ibmvtpm_resume - Resume from suspend
366 * @dev: device struct
367 *
368 * Return value:
369 * 0
370 */
371static int tpm_ibmvtpm_resume(struct device *dev)
372{
373 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
374 int rc = 0;
375
376 do {
377 if (rc)
378 msleep(100);
379 rc = plpar_hcall_norets(H_ENABLE_CRQ,
380 ibmvtpm->vdev->unit_address);
381 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
382
383 if (rc) {
384 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
385 return rc;
386 }
387
388 rc = vio_enable_interrupts(ibmvtpm->vdev);
389 if (rc) {
390 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
391 return rc;
392 }
393
394 rc = ibmvtpm_crq_send_init(ibmvtpm);
395 if (rc)
396 dev_err(dev, "Error send_init rc=%d\n", rc);
397
398 return rc;
399}
400
401static const struct file_operations ibmvtpm_ops = {
402 .owner = THIS_MODULE,
403 .llseek = no_llseek,
404 .open = tpm_open,
405 .read = tpm_read,
406 .write = tpm_write,
407 .release = tpm_release,
408};
409
410static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
411static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
412static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
413static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
414static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
415static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
416 NULL);
417static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
418static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
419static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
420static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
421
422static struct attribute *ibmvtpm_attrs[] = {
423 &dev_attr_pubek.attr,
424 &dev_attr_pcrs.attr,
425 &dev_attr_enabled.attr,
426 &dev_attr_active.attr,
427 &dev_attr_owned.attr,
428 &dev_attr_temp_deactivated.attr,
429 &dev_attr_caps.attr,
430 &dev_attr_cancel.attr,
431 &dev_attr_durations.attr,
432 &dev_attr_timeouts.attr, NULL,
433};
434
435static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
436
437static const struct tpm_vendor_specific tpm_ibmvtpm = {
438 .recv = tpm_ibmvtpm_recv,
439 .send = tpm_ibmvtpm_send,
440 .cancel = tpm_ibmvtpm_cancel,
441 .status = tpm_ibmvtpm_status,
442 .req_complete_mask = 0,
443 .req_complete_val = 0,
444 .req_canceled = 0,
445 .attr_group = &ibmvtpm_attr_grp,
446 .miscdev = { .fops = &ibmvtpm_ops, },
447};
448
449static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
450 .suspend = tpm_ibmvtpm_suspend,
451 .resume = tpm_ibmvtpm_resume,
452};
453
454/**
455 * ibmvtpm_crq_get_next - Get next responded crq
456 * @ibmvtpm vtpm device struct
457 *
458 * Return value:
459 * vtpm crq pointer
460 */
461static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
462{
463 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
464 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
465
466 if (crq->valid & VTPM_MSG_RES) {
467 if (++crq_q->index == crq_q->num_entry)
468 crq_q->index = 0;
469 smp_rmb();
470 } else
471 crq = NULL;
472 return crq;
473}
474
475/**
476 * ibmvtpm_crq_process - Process responded crq
477 * @crq crq to be processed
478 * @ibmvtpm vtpm device struct
479 *
480 * Return value:
481 * Nothing
482 */
483static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
484 struct ibmvtpm_dev *ibmvtpm)
485{
486 int rc = 0;
487
488 switch (crq->valid) {
489 case VALID_INIT_CRQ:
490 switch (crq->msg) {
491 case INIT_CRQ_RES:
492 dev_info(ibmvtpm->dev, "CRQ initialized\n");
493 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
494 if (rc)
495 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
496 return;
497 case INIT_CRQ_COMP_RES:
498 dev_info(ibmvtpm->dev,
499 "CRQ initialization completed\n");
500 return;
501 default:
502 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
503 return;
504 }
505 return;
506 case IBMVTPM_VALID_CMD:
507 switch (crq->msg) {
508 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
509 if (crq->len <= 0) {
510 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
511 return;
512 }
513 ibmvtpm->rtce_size = crq->len;
514 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
515 GFP_KERNEL);
516 if (!ibmvtpm->rtce_buf) {
517 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
518 return;
519 }
520
521 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
522 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
523 DMA_BIDIRECTIONAL);
524
525 if (dma_mapping_error(ibmvtpm->dev,
526 ibmvtpm->rtce_dma_handle)) {
527 kfree(ibmvtpm->rtce_buf);
528 ibmvtpm->rtce_buf = NULL;
529 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
530 }
531
532 return;
533 case VTPM_GET_VERSION_RES:
534 ibmvtpm->vtpm_version = crq->data;
535 return;
536 case VTPM_TPM_COMMAND_RES:
537 /* len of the data in rtce buffer */
538 ibmvtpm->res_len = crq->len;
539 wake_up_interruptible(&ibmvtpm->wq);
540 return;
541 default:
542 return;
543 }
544 }
545 return;
546}
547
548/**
549 * ibmvtpm_interrupt - Interrupt handler
550 * @irq: irq number to handle
551 * @vtpm_instance: vtpm that received interrupt
552 *
553 * Returns:
554 * IRQ_HANDLED
555 **/
556static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
557{
558 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
559 struct ibmvtpm_crq *crq;
560
561 /* while loop is needed for initial setup (get version and
562 * get rtce_size). There should be only one tpm request at any
563 * given time.
564 */
565 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
566 ibmvtpm_crq_process(crq, ibmvtpm);
567 crq->valid = 0;
568 smp_wmb();
569 }
570
571 return IRQ_HANDLED;
572}
573
574/**
575 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
576 * @vio_dev: vio device struct
577 * @id: vio device id struct
578 *
579 * Return value:
580 * 0 - Success
581 * Non-zero - Failure
582 */
583static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
584 const struct vio_device_id *id)
585{
586 struct ibmvtpm_dev *ibmvtpm;
587 struct device *dev = &vio_dev->dev;
588 struct ibmvtpm_crq_queue *crq_q;
589 struct tpm_chip *chip;
590 int rc = -ENOMEM, rc1;
591
592 chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
593 if (!chip) {
594 dev_err(dev, "tpm_register_hardware failed\n");
595 return -ENODEV;
596 }
597
598 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
599 if (!ibmvtpm) {
600 dev_err(dev, "kzalloc for ibmvtpm failed\n");
601 goto cleanup;
602 }
603
604 crq_q = &ibmvtpm->crq_queue;
605 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
606 if (!crq_q->crq_addr) {
607 dev_err(dev, "Unable to allocate memory for crq_addr\n");
608 goto cleanup;
609 }
610
611 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
612 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
613 CRQ_RES_BUF_SIZE,
614 DMA_BIDIRECTIONAL);
615
616 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
617 dev_err(dev, "dma mapping failed\n");
618 goto cleanup;
619 }
620
621 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
622 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
623 if (rc == H_RESOURCE)
624 rc = ibmvtpm_reset_crq(ibmvtpm);
625
626 if (rc) {
627 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
628 goto reg_crq_cleanup;
629 }
630
631 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
632 tpm_ibmvtpm_driver_name, ibmvtpm);
633 if (rc) {
634 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
635 goto init_irq_cleanup;
636 }
637
638 rc = vio_enable_interrupts(vio_dev);
639 if (rc) {
640 dev_err(dev, "Error %d enabling interrupts\n", rc);
641 goto init_irq_cleanup;
642 }
643
644 init_waitqueue_head(&ibmvtpm->wq);
645
646 crq_q->index = 0;
647
648 ibmvtpm->dev = dev;
649 ibmvtpm->vdev = vio_dev;
650 chip->vendor.data = (void *)ibmvtpm;
651
652 spin_lock_init(&ibmvtpm->rtce_lock);
653
654 rc = ibmvtpm_crq_send_init(ibmvtpm);
655 if (rc)
656 goto init_irq_cleanup;
657
658 rc = ibmvtpm_crq_get_version(ibmvtpm);
659 if (rc)
660 goto init_irq_cleanup;
661
662 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
663 if (rc)
664 goto init_irq_cleanup;
665
666 return rc;
667init_irq_cleanup:
668 do {
669 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
670 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
671reg_crq_cleanup:
672 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
673 DMA_BIDIRECTIONAL);
674cleanup:
675 if (ibmvtpm) {
676 if (crq_q->crq_addr)
677 free_page((unsigned long)crq_q->crq_addr);
678 kfree(ibmvtpm);
679 }
680
681 tpm_remove_hardware(dev);
682
683 return rc;
684}
685
686static struct vio_driver ibmvtpm_driver = {
687 .id_table = tpm_ibmvtpm_device_table,
688 .probe = tpm_ibmvtpm_probe,
689 .remove = tpm_ibmvtpm_remove,
690 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
691 .name = tpm_ibmvtpm_driver_name,
692 .pm = &tpm_ibmvtpm_pm_ops,
693};
694
695/**
696 * ibmvtpm_module_init - Initialize ibm vtpm module
697 *
698 * Return value:
699 * 0 -Success
700 * Non-zero - Failure
701 */
702static int __init ibmvtpm_module_init(void)
703{
704 return vio_register_driver(&ibmvtpm_driver);
705}
706
707/**
708 * ibmvtpm_module_exit - Teardown ibm vtpm module
709 *
710 * Return value:
711 * Nothing
712 */
713static void __exit ibmvtpm_module_exit(void)
714{
715 vio_unregister_driver(&ibmvtpm_driver);
716}
717
718module_init(ibmvtpm_module_init);
719module_exit(ibmvtpm_module_exit);
720
721MODULE_AUTHOR("adlai@us.ibm.com");
722MODULE_DESCRIPTION("IBM vTPM Driver");
723MODULE_VERSION("1.0");
724MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
deleted file mode 100644
index bd82a791f99..00000000000
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Copyright (C) 2012 IBM Corporation
3 *
4 * Author: Ashley Lai <adlai@us.ibm.com>
5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the
14 * License.
15 *
16 */
17
18#ifndef __TPM_IBMVTPM_H__
19#define __TPM_IBMVTPM_H__
20
21/* vTPM Message Format 1 */
22struct ibmvtpm_crq {
23 u8 valid;
24 u8 msg;
25 u16 len;
26 u32 data;
27 u64 reserved;
28} __attribute__((packed, aligned(8)));
29
30struct ibmvtpm_crq_queue {
31 struct ibmvtpm_crq *crq_addr;
32 u32 index;
33 u32 num_entry;
34};
35
36struct ibmvtpm_dev {
37 struct device *dev;
38 struct vio_dev *vdev;
39 struct ibmvtpm_crq_queue crq_queue;
40 dma_addr_t crq_dma_handle;
41 u32 rtce_size;
42 void __iomem *rtce_buf;
43 dma_addr_t rtce_dma_handle;
44 spinlock_t rtce_lock;
45 wait_queue_head_t wq;
46 u16 res_len;
47 u32 vtpm_version;
48};
49
50#define CRQ_RES_BUF_SIZE PAGE_SIZE
51
52/* Initialize CRQ */
53#define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */
54#define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */
55#define INIT_CRQ_RES 0x01 /* Init respond */
56#define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */
57#define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */
58
59/* vTPM CRQ response is the message type | 0x80 */
60#define VTPM_MSG_RES 0x80
61#define IBMVTPM_VALID_CMD 0x80
62
63/* vTPM CRQ message types */
64#define VTPM_GET_VERSION 0x01
65#define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES)
66
67#define VTPM_TPM_COMMAND 0x02
68#define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES)
69
70#define VTPM_GET_RTCE_BUFFER_SIZE 0x03
71#define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES)
72
73#define VTPM_PREPARE_TO_SUSPEND 0x04
74#define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES)
75
76#endif
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2b480c2960b..76da32e11f1 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -4,8 +4,8 @@
4 * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module 4 * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
5 * Specifications at www.trustedcomputinggroup.org 5 * Specifications at www.trustedcomputinggroup.org
6 * 6 *
7 * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net> 7 * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
8 * Sirrix AG - security technologies <tpmdd@sirrix.com> and 8 * Sirrix AG - security technologies, http://www.sirrix.com and
9 * Applied Data Security Group, Ruhr-University Bochum, Germany 9 * Applied Data Security Group, Ruhr-University Bochum, Germany
10 * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 10 * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
11 * 11 *
@@ -415,7 +415,7 @@ static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
420{ 420{
421 int rc = 0; 421 int rc = 0;
@@ -594,7 +594,7 @@ err_last:
594 return rc; 594 return rc;
595} 595}
596 596
597static void tpm_inf_pnp_remove(struct pnp_dev *dev) 597static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
598{ 598{
599 struct tpm_chip *chip = pnp_get_drvdata(dev); 599 struct tpm_chip *chip = pnp_get_drvdata(dev);
600 600
@@ -655,7 +655,7 @@ static struct pnp_driver tpm_inf_pnp_driver = {
655 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
656 .suspend = tpm_inf_pnp_suspend, 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume, 657 .resume = tpm_inf_pnp_resume,
658 .remove = tpm_inf_pnp_remove 658 .remove = __devexit_p(tpm_inf_pnp_remove)
659}; 659};
660 660
661static int __init init_inf(void) 661static int __init init_inf(void)
@@ -671,7 +671,7 @@ static void __exit cleanup_inf(void)
671module_init(init_inf); 671module_init(init_inf);
672module_exit(cleanup_inf); 672module_exit(cleanup_inf);
673 673
674MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
676MODULE_VERSION("1.9.2"); 676MODULE_VERSION("1.9.2");
677MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 640c9a427b5..4d2464871ad 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -274,13 +274,22 @@ static void tpm_nsc_remove(struct device *dev)
274 } 274 }
275} 275}
276 276
277static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); 277static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
278{
279 return tpm_pm_suspend(&dev->dev, msg);
280}
281
282static int tpm_nsc_resume(struct platform_device *dev)
283{
284 return tpm_pm_resume(&dev->dev);
285}
278 286
279static struct platform_driver nsc_drv = { 287static struct platform_driver nsc_drv = {
288 .suspend = tpm_nsc_suspend,
289 .resume = tpm_nsc_resume,
280 .driver = { 290 .driver = {
281 .name = "tpm_nsc", 291 .name = "tpm_nsc",
282 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
283 .pm = &tpm_nsc_pm,
284 }, 293 },
285}; 294};
286 295
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
deleted file mode 100644
index 98ba2bd1a35..00000000000
--- a/drivers/char/tpm/tpm_of.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright 2012 IBM Corporation
3 *
4 * Author: Ashley Lai <adlai@us.ibm.com>
5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 *
8 * Read the event log created by the firmware on PPC64
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/slab.h>
18#include <linux/of.h>
19
20#include "tpm.h"
21#include "tpm_eventlog.h"
22
23int read_log(struct tpm_bios_log *log)
24{
25 struct device_node *np;
26 const u32 *sizep;
27 const __be64 *basep;
28
29 if (log->bios_event_log != NULL) {
30 pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
31 return -EFAULT;
32 }
33
34 np = of_find_node_by_name(NULL, "ibm,vtpm");
35 if (!np) {
36 pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
37 return -ENODEV;
38 }
39
40 sizep = of_get_property(np, "linux,sml-size", NULL);
41 if (sizep == NULL) {
42 pr_err("%s: ERROR - SML size not found\n", __func__);
43 goto cleanup_eio;
44 }
45 if (*sizep == 0) {
46 pr_err("%s: ERROR - event log area empty\n", __func__);
47 goto cleanup_eio;
48 }
49
50 basep = of_get_property(np, "linux,sml-base", NULL);
51 if (basep == NULL) {
52 pr_err(KERN_ERR "%s: ERROR - SML not found\n", __func__);
53 goto cleanup_eio;
54 }
55
56 of_node_put(np);
57 log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
58 if (!log->bios_event_log) {
59 pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
60 __func__);
61 return -ENOMEM;
62 }
63
64 log->bios_event_log_end = log->bios_event_log + *sizep;
65
66 memcpy(log->bios_event_log, __va(be64_to_cpup(basep)), *sizep);
67
68 return 0;
69
70cleanup_eio:
71 of_node_put(np);
72 return -EIO;
73}
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
deleted file mode 100644
index 720ebcf29fd..00000000000
--- a/drivers/char/tpm/tpm_ppi.c
+++ /dev/null
@@ -1,463 +0,0 @@
1#include <linux/acpi.h>
2#include <acpi/acpi_drivers.h>
3#include "tpm.h"
4
5static const u8 tpm_ppi_uuid[] = {
6 0xA6, 0xFA, 0xDD, 0x3D,
7 0x1B, 0x36,
8 0xB4, 0x4E,
9 0xA4, 0x24,
10 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
11};
12static char *tpm_device_name = "TPM";
13
14#define TPM_PPI_REVISION_ID 1
15#define TPM_PPI_FN_VERSION 1
16#define TPM_PPI_FN_SUBREQ 2
17#define TPM_PPI_FN_GETREQ 3
18#define TPM_PPI_FN_GETACT 4
19#define TPM_PPI_FN_GETRSP 5
20#define TPM_PPI_FN_SUBREQ2 7
21#define TPM_PPI_FN_GETOPR 8
22#define PPI_TPM_REQ_MAX 22
23#define PPI_VS_REQ_START 128
24#define PPI_VS_REQ_END 255
25#define PPI_VERSION_LEN 3
26
27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
28 void **return_value)
29{
30 acpi_status status;
31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
32 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
33 if (strstr(buffer.pointer, context) != NULL) {
34 *return_value = handle;
35 kfree(buffer.pointer);
36 return AE_CTRL_TERMINATE;
37 }
38 return AE_OK;
39}
40
41static inline void ppi_assign_params(union acpi_object params[4],
42 u64 function_num)
43{
44 params[0].type = ACPI_TYPE_BUFFER;
45 params[0].buffer.length = sizeof(tpm_ppi_uuid);
46 params[0].buffer.pointer = (char *)tpm_ppi_uuid;
47 params[1].type = ACPI_TYPE_INTEGER;
48 params[1].integer.value = TPM_PPI_REVISION_ID;
49 params[2].type = ACPI_TYPE_INTEGER;
50 params[2].integer.value = function_num;
51 params[3].type = ACPI_TYPE_PACKAGE;
52 params[3].package.count = 0;
53 params[3].package.elements = NULL;
54}
55
56static ssize_t tpm_show_ppi_version(struct device *dev,
57 struct device_attribute *attr, char *buf)
58{
59 acpi_handle handle;
60 acpi_status status;
61 struct acpi_object_list input;
62 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
63 union acpi_object params[4];
64 union acpi_object *obj;
65
66 input.count = 4;
67 ppi_assign_params(params, TPM_PPI_FN_VERSION);
68 input.pointer = params;
69 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
70 ACPI_UINT32_MAX, ppi_callback, NULL,
71 tpm_device_name, &handle);
72 if (ACPI_FAILURE(status))
73 return -ENXIO;
74
75 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
76 ACPI_TYPE_STRING);
77 if (ACPI_FAILURE(status))
78 return -ENOMEM;
79 obj = (union acpi_object *)output.pointer;
80 status = scnprintf(buf, PAGE_SIZE, "%s\n", obj->string.pointer);
81 kfree(output.pointer);
82 return status;
83}
84
85static ssize_t tpm_show_ppi_request(struct device *dev,
86 struct device_attribute *attr, char *buf)
87{
88 acpi_handle handle;
89 acpi_status status;
90 struct acpi_object_list input;
91 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
92 union acpi_object params[4];
93 union acpi_object *ret_obj;
94
95 input.count = 4;
96 ppi_assign_params(params, TPM_PPI_FN_GETREQ);
97 input.pointer = params;
98 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
99 ACPI_UINT32_MAX, ppi_callback, NULL,
100 tpm_device_name, &handle);
101 if (ACPI_FAILURE(status))
102 return -ENXIO;
103
104 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
105 ACPI_TYPE_PACKAGE);
106 if (ACPI_FAILURE(status))
107 return -ENOMEM;
108 /*
109 * output.pointer should be of package type, including two integers.
110 * The first is function return code, 0 means success and 1 means
111 * error. The second is pending TPM operation requested by the OS, 0
112 * means none and >0 means operation value.
113 */
114 ret_obj = ((union acpi_object *)output.pointer)->package.elements;
115 if (ret_obj->type == ACPI_TYPE_INTEGER) {
116 if (ret_obj->integer.value) {
117 status = -EFAULT;
118 goto cleanup;
119 }
120 ret_obj++;
121 if (ret_obj->type == ACPI_TYPE_INTEGER)
122 status = scnprintf(buf, PAGE_SIZE, "%llu\n",
123 ret_obj->integer.value);
124 else
125 status = -EINVAL;
126 } else {
127 status = -EINVAL;
128 }
129cleanup:
130 kfree(output.pointer);
131 return status;
132}
133
134static ssize_t tpm_store_ppi_request(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
137{
138 char version[PPI_VERSION_LEN + 1];
139 acpi_handle handle;
140 acpi_status status;
141 struct acpi_object_list input;
142 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
143 union acpi_object params[4];
144 union acpi_object obj;
145 u32 req;
146 u64 ret;
147
148 input.count = 4;
149 ppi_assign_params(params, TPM_PPI_FN_VERSION);
150 input.pointer = params;
151 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
152 ACPI_UINT32_MAX, ppi_callback, NULL,
153 tpm_device_name, &handle);
154 if (ACPI_FAILURE(status))
155 return -ENXIO;
156
157 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
158 ACPI_TYPE_STRING);
159 if (ACPI_FAILURE(status))
160 return -ENOMEM;
161 strncpy(version,
162 ((union acpi_object *)output.pointer)->string.pointer,
163 PPI_VERSION_LEN);
164 kfree(output.pointer);
165 output.length = ACPI_ALLOCATE_BUFFER;
166 output.pointer = NULL;
167 /*
168 * the function to submit TPM operation request to pre-os environment
169 * is updated with function index from SUBREQ to SUBREQ2 since PPI
170 * version 1.1
171 */
172 if (strcmp(version, "1.1") == -1)
173 params[2].integer.value = TPM_PPI_FN_SUBREQ;
174 else
175 params[2].integer.value = TPM_PPI_FN_SUBREQ2;
176 /*
177 * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
178 * accept buffer/string/integer type, but some BIOS accept buffer/
179 * string/package type. For PPI version 1.0 and 1.1, use buffer type
180 * for compatibility, and use package type since 1.2 according to spec.
181 */
182 if (strcmp(version, "1.2") == -1) {
183 params[3].type = ACPI_TYPE_BUFFER;
184 params[3].buffer.length = sizeof(req);
185 sscanf(buf, "%d", &req);
186 params[3].buffer.pointer = (char *)&req;
187 } else {
188 params[3].package.count = 1;
189 obj.type = ACPI_TYPE_INTEGER;
190 sscanf(buf, "%llu", &obj.integer.value);
191 params[3].package.elements = &obj;
192 }
193
194 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
195 ACPI_TYPE_INTEGER);
196 if (ACPI_FAILURE(status))
197 return -ENOMEM;
198 ret = ((union acpi_object *)output.pointer)->integer.value;
199 if (ret == 0)
200 status = (acpi_status)count;
201 else if (ret == 1)
202 status = -EPERM;
203 else
204 status = -EFAULT;
205 kfree(output.pointer);
206 return status;
207}
208
209static ssize_t tpm_show_ppi_transition_action(struct device *dev,
210 struct device_attribute *attr,
211 char *buf)
212{
213 char version[PPI_VERSION_LEN + 1];
214 acpi_handle handle;
215 acpi_status status;
216 struct acpi_object_list input;
217 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
218 union acpi_object params[4];
219 u32 ret;
220 char *info[] = {
221 "None",
222 "Shutdown",
223 "Reboot",
224 "OS Vendor-specific",
225 "Error",
226 };
227 input.count = 4;
228 ppi_assign_params(params, TPM_PPI_FN_VERSION);
229 input.pointer = params;
230 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
231 ACPI_UINT32_MAX, ppi_callback, NULL,
232 tpm_device_name, &handle);
233 if (ACPI_FAILURE(status))
234 return -ENXIO;
235
236 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
237 ACPI_TYPE_STRING);
238 if (ACPI_FAILURE(status))
239 return -ENOMEM;
240 strncpy(version,
241 ((union acpi_object *)output.pointer)->string.pointer,
242 PPI_VERSION_LEN);
243 /*
244 * PPI spec defines params[3].type as empty package, but some platforms
245 * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
246 * compatibility, define params[3].type as buffer, if PPI version < 1.2
247 */
248 if (strcmp(version, "1.2") == -1) {
249 params[3].type = ACPI_TYPE_BUFFER;
250 params[3].buffer.length = 0;
251 params[3].buffer.pointer = NULL;
252 }
253 params[2].integer.value = TPM_PPI_FN_GETACT;
254 kfree(output.pointer);
255 output.length = ACPI_ALLOCATE_BUFFER;
256 output.pointer = NULL;
257 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
258 ACPI_TYPE_INTEGER);
259 if (ACPI_FAILURE(status))
260 return -ENOMEM;
261 ret = ((union acpi_object *)output.pointer)->integer.value;
262 if (ret < ARRAY_SIZE(info) - 1)
263 status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
264 else
265 status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
266 info[ARRAY_SIZE(info)-1]);
267 kfree(output.pointer);
268 return status;
269}
270
271static ssize_t tpm_show_ppi_response(struct device *dev,
272 struct device_attribute *attr,
273 char *buf)
274{
275 acpi_handle handle;
276 acpi_status status;
277 struct acpi_object_list input;
278 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
279 union acpi_object params[4];
280 union acpi_object *ret_obj;
281 u64 req;
282
283 input.count = 4;
284 ppi_assign_params(params, TPM_PPI_FN_GETRSP);
285 input.pointer = params;
286 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
287 ACPI_UINT32_MAX, ppi_callback, NULL,
288 tpm_device_name, &handle);
289 if (ACPI_FAILURE(status))
290 return -ENXIO;
291
292 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
293 ACPI_TYPE_PACKAGE);
294 if (ACPI_FAILURE(status))
295 return -ENOMEM;
296 /*
297 * parameter output.pointer should be of package type, including
298 * 3 integers. The first means function return code, the second means
299 * most recent TPM operation request, and the last means response to
300 * the most recent TPM operation request. Only if the first is 0, and
301 * the second integer is not 0, the response makes sense.
302 */
303 ret_obj = ((union acpi_object *)output.pointer)->package.elements;
304 if (ret_obj->type != ACPI_TYPE_INTEGER) {
305 status = -EINVAL;
306 goto cleanup;
307 }
308 if (ret_obj->integer.value) {
309 status = -EFAULT;
310 goto cleanup;
311 }
312 ret_obj++;
313 if (ret_obj->type != ACPI_TYPE_INTEGER) {
314 status = -EINVAL;
315 goto cleanup;
316 }
317 if (ret_obj->integer.value) {
318 req = ret_obj->integer.value;
319 ret_obj++;
320 if (ret_obj->type != ACPI_TYPE_INTEGER) {
321 status = -EINVAL;
322 goto cleanup;
323 }
324 if (ret_obj->integer.value == 0)
325 status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
326 "0: Success");
327 else if (ret_obj->integer.value == 0xFFFFFFF0)
328 status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
329 "0xFFFFFFF0: User Abort");
330 else if (ret_obj->integer.value == 0xFFFFFFF1)
331 status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
332 "0xFFFFFFF1: BIOS Failure");
333 else if (ret_obj->integer.value >= 1 &&
334 ret_obj->integer.value <= 0x00000FFF)
335 status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
336 req, ret_obj->integer.value,
337 "Corresponding TPM error");
338 else
339 status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
340 req, ret_obj->integer.value,
341 "Error");
342 } else {
343 status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
344 ret_obj->integer.value, "No Recent Request");
345 }
346cleanup:
347 kfree(output.pointer);
348 return status;
349}
350
351static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
352{
353 char *str = buf;
354 char version[PPI_VERSION_LEN];
355 acpi_handle handle;
356 acpi_status status;
357 struct acpi_object_list input;
358 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
359 union acpi_object params[4];
360 union acpi_object obj;
361 int i;
362 u32 ret;
363 char *info[] = {
364 "Not implemented",
365 "BIOS only",
366 "Blocked for OS by BIOS",
367 "User required",
368 "User not required",
369 };
370 input.count = 4;
371 ppi_assign_params(params, TPM_PPI_FN_VERSION);
372 input.pointer = params;
373 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
374 ACPI_UINT32_MAX, ppi_callback, NULL,
375 tpm_device_name, &handle);
376 if (ACPI_FAILURE(status))
377 return -ENXIO;
378
379 status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
380 ACPI_TYPE_STRING);
381 if (ACPI_FAILURE(status))
382 return -ENOMEM;
383
384 strncpy(version,
385 ((union acpi_object *)output.pointer)->string.pointer,
386 PPI_VERSION_LEN);
387 kfree(output.pointer);
388 output.length = ACPI_ALLOCATE_BUFFER;
389 output.pointer = NULL;
390 if (strcmp(version, "1.2") == -1)
391 return -EPERM;
392
393 params[2].integer.value = TPM_PPI_FN_GETOPR;
394 params[3].package.count = 1;
395 obj.type = ACPI_TYPE_INTEGER;
396 params[3].package.elements = &obj;
397 for (i = start; i <= end; i++) {
398 obj.integer.value = i;
399 status = acpi_evaluate_object_typed(handle, "_DSM",
400 &input, &output, ACPI_TYPE_INTEGER);
401 if (ACPI_FAILURE(status))
402 return -ENOMEM;
403
404 ret = ((union acpi_object *)output.pointer)->integer.value;
405 if (ret > 0 && ret < ARRAY_SIZE(info))
406 str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
407 i, ret, info[ret]);
408 kfree(output.pointer);
409 output.length = ACPI_ALLOCATE_BUFFER;
410 output.pointer = NULL;
411 }
412 return str - buf;
413}
414
415static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
416 struct device_attribute *attr,
417 char *buf)
418{
419 return show_ppi_operations(buf, 0, PPI_TPM_REQ_MAX);
420}
421
422static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
423 struct device_attribute *attr,
424 char *buf)
425{
426 return show_ppi_operations(buf, PPI_VS_REQ_START, PPI_VS_REQ_END);
427}
428
429static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
430static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP,
431 tpm_show_ppi_request, tpm_store_ppi_request);
432static DEVICE_ATTR(transition_action, S_IRUGO,
433 tpm_show_ppi_transition_action, NULL);
434static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL);
435static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL);
436static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL);
437
438static struct attribute *ppi_attrs[] = {
439 &dev_attr_version.attr,
440 &dev_attr_request.attr,
441 &dev_attr_transition_action.attr,
442 &dev_attr_response.attr,
443 &dev_attr_tcg_operations.attr,
444 &dev_attr_vs_operations.attr, NULL,
445};
446static struct attribute_group ppi_attr_grp = {
447 .name = "ppi",
448 .attrs = ppi_attrs
449};
450
451int tpm_add_ppi(struct kobject *parent)
452{
453 return sysfs_create_group(parent, &ppi_attr_grp);
454}
455EXPORT_SYMBOL_GPL(tpm_add_ppi);
456
457void tpm_remove_ppi(struct kobject *parent)
458{
459 sysfs_remove_group(parent, &ppi_attr_grp);
460}
461EXPORT_SYMBOL_GPL(tpm_remove_ppi);
462
463MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index ea31dafbcac..3f4051a7c5a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -29,6 +29,8 @@
29#include <linux/freezer.h> 29#include <linux/freezer.h>
30#include "tpm.h" 30#include "tpm.h"
31 31
32#define TPM_HEADER_SIZE 10
33
32enum tis_access { 34enum tis_access {
33 TPM_ACCESS_VALID = 0x80, 35 TPM_ACCESS_VALID = 0x80,
34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20, 36 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
@@ -76,7 +78,7 @@ enum tis_defaults {
76#define TPM_RID(l) (0x0F04 | ((l) << 12)) 78#define TPM_RID(l) (0x0F04 | ((l) << 12))
77 79
78static LIST_HEAD(tis_chips); 80static LIST_HEAD(tis_chips);
79static DEFINE_MUTEX(tis_lock); 81static DEFINE_SPINLOCK(tis_lock);
80 82
81#if defined(CONFIG_PNP) && defined(CONFIG_ACPI) 83#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
82static int is_itpm(struct pnp_dev *dev) 84static int is_itpm(struct pnp_dev *dev)
@@ -191,14 +193,54 @@ static int get_burstcount(struct tpm_chip *chip)
191 return -EBUSY; 193 return -EBUSY;
192} 194}
193 195
196static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
197 wait_queue_head_t *queue)
198{
199 unsigned long stop;
200 long rc;
201 u8 status;
202
203 /* check current status */
204 status = tpm_tis_status(chip);
205 if ((status & mask) == mask)
206 return 0;
207
208 stop = jiffies + timeout;
209
210 if (chip->vendor.irq) {
211again:
212 timeout = stop - jiffies;
213 if ((long)timeout <= 0)
214 return -ETIME;
215 rc = wait_event_interruptible_timeout(*queue,
216 ((tpm_tis_status
217 (chip) & mask) ==
218 mask), timeout);
219 if (rc > 0)
220 return 0;
221 if (rc == -ERESTARTSYS && freezing(current)) {
222 clear_thread_flag(TIF_SIGPENDING);
223 goto again;
224 }
225 } else {
226 do {
227 msleep(TPM_TIMEOUT);
228 status = tpm_tis_status(chip);
229 if ((status & mask) == mask)
230 return 0;
231 } while (time_before(jiffies, stop));
232 }
233 return -ETIME;
234}
235
194static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) 236static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
195{ 237{
196 int size = 0, burstcnt; 238 int size = 0, burstcnt;
197 while (size < count && 239 while (size < count &&
198 wait_for_tpm_stat(chip, 240 wait_for_stat(chip,
199 TPM_STS_DATA_AVAIL | TPM_STS_VALID, 241 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
200 chip->vendor.timeout_c, 242 chip->vendor.timeout_c,
201 &chip->vendor.read_queue) 243 &chip->vendor.read_queue)
202 == 0) { 244 == 0) {
203 burstcnt = get_burstcount(chip); 245 burstcnt = get_burstcount(chip);
204 for (; burstcnt > 0 && size < count; burstcnt--) 246 for (; burstcnt > 0 && size < count; burstcnt--)
@@ -240,8 +282,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
240 goto out; 282 goto out;
241 } 283 }
242 284
243 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 285 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
244 &chip->vendor.int_queue); 286 &chip->vendor.int_queue);
245 status = tpm_tis_status(chip); 287 status = tpm_tis_status(chip);
246 if (status & TPM_STS_DATA_AVAIL) { /* retry? */ 288 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
247 dev_err(chip->dev, "Error left over data\n"); 289 dev_err(chip->dev, "Error left over data\n");
@@ -255,7 +297,7 @@ out:
255 return size; 297 return size;
256} 298}
257 299
258static bool itpm; 300static int itpm;
259module_param(itpm, bool, 0444); 301module_param(itpm, bool, 0444);
260MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); 302MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
261 303
@@ -275,7 +317,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
275 status = tpm_tis_status(chip); 317 status = tpm_tis_status(chip);
276 if ((status & TPM_STS_COMMAND_READY) == 0) { 318 if ((status & TPM_STS_COMMAND_READY) == 0) {
277 tpm_tis_ready(chip); 319 tpm_tis_ready(chip);
278 if (wait_for_tpm_stat 320 if (wait_for_stat
279 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, 321 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
280 &chip->vendor.int_queue) < 0) { 322 &chip->vendor.int_queue) < 0) {
281 rc = -ETIME; 323 rc = -ETIME;
@@ -291,8 +333,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
291 count++; 333 count++;
292 } 334 }
293 335
294 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 336 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
295 &chip->vendor.int_queue); 337 &chip->vendor.int_queue);
296 status = tpm_tis_status(chip); 338 status = tpm_tis_status(chip);
297 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { 339 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
298 rc = -EIO; 340 rc = -EIO;
@@ -303,8 +345,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
303 /* write last byte */ 345 /* write last byte */
304 iowrite8(buf[count], 346 iowrite8(buf[count],
305 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); 347 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
306 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 348 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
307 &chip->vendor.int_queue); 349 &chip->vendor.int_queue);
308 status = tpm_tis_status(chip); 350 status = tpm_tis_status(chip);
309 if ((status & TPM_STS_DATA_EXPECT) != 0) { 351 if ((status & TPM_STS_DATA_EXPECT) != 0) {
310 rc = -EIO; 352 rc = -EIO;
@@ -339,7 +381,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
339 381
340 if (chip->vendor.irq) { 382 if (chip->vendor.irq) {
341 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 383 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
342 if (wait_for_tpm_stat 384 if (wait_for_stat
343 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, 385 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
344 tpm_calc_ordinal_duration(chip, ordinal), 386 tpm_calc_ordinal_duration(chip, ordinal),
345 &chip->vendor.read_queue) < 0) { 387 &chip->vendor.read_queue) < 0) {
@@ -367,12 +409,7 @@ static int probe_itpm(struct tpm_chip *chip)
367 0x00, 0x00, 0x00, 0xf1 409 0x00, 0x00, 0x00, 0xf1
368 }; 410 };
369 size_t len = sizeof(cmd_getticks); 411 size_t len = sizeof(cmd_getticks);
370 bool rem_itpm = itpm; 412 int rem_itpm = itpm;
371 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
372
373 /* probe only iTPMS */
374 if (vendor != TPM_VID_INTEL)
375 return 0;
376 413
377 itpm = 0; 414 itpm = 0;
378 415
@@ -502,7 +539,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
502 return IRQ_HANDLED; 539 return IRQ_HANDLED;
503} 540}
504 541
505static bool interrupts = 1; 542static int interrupts = 1;
506module_param(interrupts, bool, 0444); 543module_param(interrupts, bool, 0444);
507MODULE_PARM_DESC(interrupts, "Enable interrupts"); 544MODULE_PARM_DESC(interrupts, "Enable interrupts");
508 545
@@ -510,7 +547,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
510 resource_size_t len, unsigned int irq) 547 resource_size_t len, unsigned int irq)
511{ 548{
512 u32 vendor, intfcaps, intmask; 549 u32 vendor, intfcaps, intmask;
513 int rc, i, irq_s, irq_e, probe; 550 int rc, i, irq_s, irq_e;
514 struct tpm_chip *chip; 551 struct tpm_chip *chip;
515 552
516 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 553 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -540,12 +577,11 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
540 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 577 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
541 578
542 if (!itpm) { 579 if (!itpm) {
543 probe = probe_itpm(chip); 580 itpm = probe_itpm(chip);
544 if (probe < 0) { 581 if (itpm < 0) {
545 rc = -ENODEV; 582 rc = -ENODEV;
546 goto out_err; 583 goto out_err;
547 } 584 }
548 itpm = (probe == 0) ? 0 : 1;
549 } 585 }
550 586
551 if (itpm) 587 if (itpm)
@@ -578,17 +614,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
578 dev_dbg(dev, "\tData Avail Int Support\n"); 614 dev_dbg(dev, "\tData Avail Int Support\n");
579 615
580 /* get the timeouts before testing for irqs */ 616 /* get the timeouts before testing for irqs */
581 if (tpm_get_timeouts(chip)) { 617 tpm_get_timeouts(chip);
582 dev_err(dev, "Could not get TPM timeouts and durations\n");
583 rc = -ENODEV;
584 goto out_err;
585 }
586
587 if (tpm_do_selftest(chip)) {
588 dev_err(dev, "TPM self test failed\n");
589 rc = -ENODEV;
590 goto out_err;
591 }
592 618
593 /* INTERRUPT Setup */ 619 /* INTERRUPT Setup */
594 init_waitqueue_head(&chip->vendor.read_queue); 620 init_waitqueue_head(&chip->vendor.read_queue);
@@ -692,10 +718,11 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
692 } 718 }
693 719
694 INIT_LIST_HEAD(&chip->vendor.list); 720 INIT_LIST_HEAD(&chip->vendor.list);
695 mutex_lock(&tis_lock); 721 spin_lock(&tis_lock);
696 list_add(&chip->vendor.list, &tis_chips); 722 list_add(&chip->vendor.list, &tis_chips);
697 mutex_unlock(&tis_lock); 723 spin_unlock(&tis_lock);
698 724
725 tpm_continue_selftest(chip);
699 726
700 return 0; 727 return 0;
701out_err: 728out_err:
@@ -705,7 +732,6 @@ out_err:
705 return rc; 732 return rc;
706} 733}
707 734
708#if defined(CONFIG_PNP) || defined(CONFIG_PM_SLEEP)
709static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) 735static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
710{ 736{
711 u32 intmask; 737 u32 intmask;
@@ -726,10 +752,10 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
726 iowrite32(intmask, 752 iowrite32(intmask,
727 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 753 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
728} 754}
729#endif 755
730 756
731#ifdef CONFIG_PNP 757#ifdef CONFIG_PNP
732static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 758static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
733 const struct pnp_device_id *pnp_id) 759 const struct pnp_device_id *pnp_id)
734{ 760{
735 resource_size_t start, len; 761 resource_size_t start, len;
@@ -751,7 +777,7 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
751 777
752static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) 778static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
753{ 779{
754 return tpm_pm_suspend(&dev->dev); 780 return tpm_pm_suspend(&dev->dev, msg);
755} 781}
756 782
757static int tpm_tis_pnp_resume(struct pnp_dev *dev) 783static int tpm_tis_pnp_resume(struct pnp_dev *dev)
@@ -764,12 +790,12 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
764 790
765 ret = tpm_pm_resume(&dev->dev); 791 ret = tpm_pm_resume(&dev->dev);
766 if (!ret) 792 if (!ret)
767 tpm_do_selftest(chip); 793 tpm_continue_selftest(chip);
768 794
769 return ret; 795 return ret;
770} 796}
771 797
772static struct pnp_device_id tpm_pnp_tbl[] = { 798static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
773 {"PNP0C31", 0}, /* TPM */ 799 {"PNP0C31", 0}, /* TPM */
774 {"ATM1200", 0}, /* Atmel */ 800 {"ATM1200", 0}, /* Atmel */
775 {"IFX0102", 0}, /* Infineon */ 801 {"IFX0102", 0}, /* Infineon */
@@ -783,7 +809,7 @@ static struct pnp_device_id tpm_pnp_tbl[] = {
783}; 809};
784MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 810MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
785 811
786static void tpm_tis_pnp_remove(struct pnp_dev *dev) 812static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
787{ 813{
788 struct tpm_chip *chip = pnp_get_drvdata(dev); 814 struct tpm_chip *chip = pnp_get_drvdata(dev);
789 815
@@ -807,32 +833,32 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
807 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 833 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
808MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 834MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
809#endif 835#endif
836static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
837{
838 return tpm_pm_suspend(&dev->dev, msg);
839}
810 840
811#ifdef CONFIG_PM_SLEEP 841static int tpm_tis_resume(struct platform_device *dev)
812static int tpm_tis_resume(struct device *dev)
813{ 842{
814 struct tpm_chip *chip = dev_get_drvdata(dev); 843 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
815 844
816 if (chip->vendor.irq) 845 if (chip->vendor.irq)
817 tpm_tis_reenable_interrupts(chip); 846 tpm_tis_reenable_interrupts(chip);
818 847
819 return tpm_pm_resume(dev); 848 return tpm_pm_resume(&dev->dev);
820} 849}
821#endif
822
823static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
824
825static struct platform_driver tis_drv = { 850static struct platform_driver tis_drv = {
826 .driver = { 851 .driver = {
827 .name = "tpm_tis", 852 .name = "tpm_tis",
828 .owner = THIS_MODULE, 853 .owner = THIS_MODULE,
829 .pm = &tpm_tis_pm,
830 }, 854 },
855 .suspend = tpm_tis_suspend,
856 .resume = tpm_tis_resume,
831}; 857};
832 858
833static struct platform_device *pdev; 859static struct platform_device *pdev;
834 860
835static bool force; 861static int force;
836module_param(force, bool, 0444); 862module_param(force, bool, 0444);
837MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); 863MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
838static int __init init_tis(void) 864static int __init init_tis(void)
@@ -859,7 +885,7 @@ static void __exit cleanup_tis(void)
859{ 885{
860 struct tpm_vendor_specific *i, *j; 886 struct tpm_vendor_specific *i, *j;
861 struct tpm_chip *chip; 887 struct tpm_chip *chip;
862 mutex_lock(&tis_lock); 888 spin_lock(&tis_lock);
863 list_for_each_entry_safe(i, j, &tis_chips, list) { 889 list_for_each_entry_safe(i, j, &tis_chips, list) {
864 chip = to_tpm_chip(i); 890 chip = to_tpm_chip(i);
865 tpm_remove_hardware(chip->dev); 891 tpm_remove_hardware(chip->dev);
@@ -875,7 +901,7 @@ static void __exit cleanup_tis(void)
875 iounmap(i->iobase); 901 iounmap(i->iobase);
876 list_del(&i->list); 902 list_del(&i->list);
877 } 903 }
878 mutex_unlock(&tis_lock); 904 spin_unlock(&tis_lock);
879#ifdef CONFIG_PNP 905#ifdef CONFIG_PNP
880 if (!force) { 906 if (!force) {
881 pnp_unregister_driver(&tis_pnp_driver); 907 pnp_unregister_driver(&tis_pnp_driver);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4945bd3d18d..a1f68af4ccf 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -17,7 +17,6 @@
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/serial.h> 18#include <linux/serial.h>
19#include <linux/tty.h> 19#include <linux/tty.h>
20#include <linux/export.h>
21 20
22struct ttyprintk_port { 21struct ttyprintk_port {
23 struct tty_port port; 22 struct tty_port port;
@@ -67,7 +66,7 @@ static int tpk_printk(const unsigned char *buf, int count)
67 tmp[tpk_curr + 1] = '\0'; 66 tmp[tpk_curr + 1] = '\0';
68 printk(KERN_INFO "%s%s", tpk_tag, tmp); 67 printk(KERN_INFO "%s%s", tpk_tag, tmp);
69 tpk_curr = 0; 68 tpk_curr = 0;
70 if ((i + 1) < count && buf[i + 1] == '\n') 69 if (buf[i + 1] == '\n')
71 i++; 70 i++;
72 break; 71 break;
73 case '\n': 72 case '\n':
@@ -171,35 +170,31 @@ static const struct tty_operations ttyprintk_ops = {
171 .ioctl = tpk_ioctl, 170 .ioctl = tpk_ioctl,
172}; 171};
173 172
174static struct tty_port_operations null_ops = { }; 173struct tty_port_operations null_ops = { };
175 174
176static struct tty_driver *ttyprintk_driver; 175static struct tty_driver *ttyprintk_driver;
177 176
178static int __init ttyprintk_init(void) 177static int __init ttyprintk_init(void)
179{ 178{
180 int ret = -ENOMEM; 179 int ret = -ENOMEM;
180 void *rp;
181 181
182 tpk_port.port.ops = &null_ops; 182 ttyprintk_driver = alloc_tty_driver(1);
183 mutex_init(&tpk_port.port_write_mutex); 183 if (!ttyprintk_driver)
184 184 return ret;
185 ttyprintk_driver = tty_alloc_driver(1,
186 TTY_DRIVER_RESET_TERMIOS |
187 TTY_DRIVER_REAL_RAW |
188 TTY_DRIVER_UNNUMBERED_NODE);
189 if (IS_ERR(ttyprintk_driver))
190 return PTR_ERR(ttyprintk_driver);
191
192 tty_port_init(&tpk_port.port);
193 185
186 ttyprintk_driver->owner = THIS_MODULE;
194 ttyprintk_driver->driver_name = "ttyprintk"; 187 ttyprintk_driver->driver_name = "ttyprintk";
195 ttyprintk_driver->name = "ttyprintk"; 188 ttyprintk_driver->name = "ttyprintk";
196 ttyprintk_driver->major = TTYAUX_MAJOR; 189 ttyprintk_driver->major = TTYAUX_MAJOR;
197 ttyprintk_driver->minor_start = 3; 190 ttyprintk_driver->minor_start = 3;
191 ttyprintk_driver->num = 1;
198 ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE; 192 ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
199 ttyprintk_driver->init_termios = tty_std_termios; 193 ttyprintk_driver->init_termios = tty_std_termios;
200 ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET; 194 ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
195 ttyprintk_driver->flags = TTY_DRIVER_RESET_TERMIOS |
196 TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
201 tty_set_operations(ttyprintk_driver, &ttyprintk_ops); 197 tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
202 tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0);
203 198
204 ret = tty_register_driver(ttyprintk_driver); 199 ret = tty_register_driver(ttyprintk_driver);
205 if (ret < 0) { 200 if (ret < 0) {
@@ -207,12 +202,23 @@ static int __init ttyprintk_init(void)
207 goto error; 202 goto error;
208 } 203 }
209 204
205 /* create our unnumbered device */
206 rp = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 3), NULL,
207 ttyprintk_driver->name);
208 if (IS_ERR(rp)) {
209 printk(KERN_ERR "Couldn't create ttyprintk device\n");
210 ret = PTR_ERR(rp);
211 goto error;
212 }
213
214 tty_port_init(&tpk_port.port);
215 tpk_port.port.ops = &null_ops;
216 mutex_init(&tpk_port.port_write_mutex);
217
210 return 0; 218 return 0;
211 219
212error: 220error:
213 tty_unregister_driver(ttyprintk_driver);
214 put_tty_driver(ttyprintk_driver); 221 put_tty_driver(ttyprintk_driver);
215 tty_port_destroy(&tpk_port.port);
216 ttyprintk_driver = NULL; 222 ttyprintk_driver = NULL;
217 return ret; 223 return ret;
218} 224}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 684b0d53764..fb68b129537 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -19,13 +19,9 @@
19 */ 19 */
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/completion.h>
23#include <linux/device.h> 22#include <linux/device.h>
24#include <linux/err.h> 23#include <linux/err.h>
25#include <linux/freezer.h>
26#include <linux/fs.h> 24#include <linux/fs.h>
27#include <linux/splice.h>
28#include <linux/pagemap.h>
29#include <linux/init.h> 25#include <linux/init.h>
30#include <linux/list.h> 26#include <linux/list.h>
31#include <linux/poll.h> 27#include <linux/poll.h>
@@ -36,13 +32,8 @@
36#include <linux/virtio_console.h> 32#include <linux/virtio_console.h>
37#include <linux/wait.h> 33#include <linux/wait.h>
38#include <linux/workqueue.h> 34#include <linux/workqueue.h>
39#include <linux/module.h>
40#include <linux/dma-mapping.h>
41#include <linux/kconfig.h>
42#include "../tty/hvc/hvc_console.h" 35#include "../tty/hvc/hvc_console.h"
43 36
44#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
45
46/* 37/*
47 * This is a global struct for storing common data for all the devices 38 * This is a global struct for storing common data for all the devices
48 * this driver handles. 39 * this driver handles.
@@ -82,7 +73,6 @@ struct ports_driver_data {
82static struct ports_driver_data pdrvdata; 73static struct ports_driver_data pdrvdata;
83 74
84DEFINE_SPINLOCK(pdrvdata_lock); 75DEFINE_SPINLOCK(pdrvdata_lock);
85DECLARE_COMPLETION(early_console_added);
86 76
87/* This struct holds information that's relevant only for console ports */ 77/* This struct holds information that's relevant only for console ports */
88struct console { 78struct console {
@@ -115,21 +105,6 @@ struct port_buffer {
115 size_t len; 105 size_t len;
116 /* offset in the buf from which to consume data */ 106 /* offset in the buf from which to consume data */
117 size_t offset; 107 size_t offset;
118
119 /* DMA address of buffer */
120 dma_addr_t dma;
121
122 /* Device we got DMA memory from */
123 struct device *dev;
124
125 /* List of pending dma buffers to free */
126 struct list_head list;
127
128 /* If sgpages == 0 then buf is used */
129 unsigned int sgpages;
130
131 /* sg is used if spages > 0. sg must be the last in is struct */
132 struct scatterlist sg[0];
133}; 108};
134 109
135/* 110/*
@@ -176,10 +151,6 @@ struct ports_device {
176 int chr_major; 151 int chr_major;
177}; 152};
178 153
179struct port_stats {
180 unsigned long bytes_sent, bytes_received, bytes_discarded;
181};
182
183/* This struct holds the per-port data */ 154/* This struct holds the per-port data */
184struct port { 155struct port {
185 /* Next port in the list, head is in the ports_device */ 156 /* Next port in the list, head is in the ports_device */
@@ -208,13 +179,6 @@ struct port {
208 struct dentry *debugfs_file; 179 struct dentry *debugfs_file;
209 180
210 /* 181 /*
211 * Keep count of the bytes sent, received and discarded for
212 * this port for accounting and debugging purposes. These
213 * counts are not reset across port open / close events.
214 */
215 struct port_stats stats;
216
217 /*
218 * The entries in this struct will be valid if this port is 182 * The entries in this struct will be valid if this port is
219 * hooked up to an hvc console 183 * hooked up to an hvc console
220 */ 184 */
@@ -344,11 +308,6 @@ static bool is_console_port(struct port *port)
344 return false; 308 return false;
345} 309}
346 310
347static bool is_rproc_serial(const struct virtio_device *vdev)
348{
349 return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
350}
351
352static inline bool use_multiport(struct ports_device *portdev) 311static inline bool use_multiport(struct ports_device *portdev)
353{ 312{
354 /* 313 /*
@@ -360,110 +319,20 @@ static inline bool use_multiport(struct ports_device *portdev)
360 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); 319 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
361} 320}
362 321
363static DEFINE_SPINLOCK(dma_bufs_lock); 322static void free_buf(struct port_buffer *buf)
364static LIST_HEAD(pending_free_dma_bufs);
365
366static void free_buf(struct port_buffer *buf, bool can_sleep)
367{ 323{
368 unsigned int i; 324 kfree(buf->buf);
369
370 for (i = 0; i < buf->sgpages; i++) {
371 struct page *page = sg_page(&buf->sg[i]);
372 if (!page)
373 break;
374 put_page(page);
375 }
376
377 if (!buf->dev) {
378 kfree(buf->buf);
379 } else if (is_rproc_enabled) {
380 unsigned long flags;
381
382 /* dma_free_coherent requires interrupts to be enabled. */
383 if (!can_sleep) {
384 /* queue up dma-buffers to be freed later */
385 spin_lock_irqsave(&dma_bufs_lock, flags);
386 list_add_tail(&buf->list, &pending_free_dma_bufs);
387 spin_unlock_irqrestore(&dma_bufs_lock, flags);
388 return;
389 }
390 dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
391
392 /* Release device refcnt and allow it to be freed */
393 put_device(buf->dev);
394 }
395
396 kfree(buf); 325 kfree(buf);
397} 326}
398 327
399static void reclaim_dma_bufs(void) 328static struct port_buffer *alloc_buf(size_t buf_size)
400{
401 unsigned long flags;
402 struct port_buffer *buf, *tmp;
403 LIST_HEAD(tmp_list);
404
405 if (list_empty(&pending_free_dma_bufs))
406 return;
407
408 /* Create a copy of the pending_free_dma_bufs while holding the lock */
409 spin_lock_irqsave(&dma_bufs_lock, flags);
410 list_cut_position(&tmp_list, &pending_free_dma_bufs,
411 pending_free_dma_bufs.prev);
412 spin_unlock_irqrestore(&dma_bufs_lock, flags);
413
414 /* Release the dma buffers, without irqs enabled */
415 list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
416 list_del(&buf->list);
417 free_buf(buf, true);
418 }
419}
420
421static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
422 int pages)
423{ 329{
424 struct port_buffer *buf; 330 struct port_buffer *buf;
425 331
426 reclaim_dma_bufs(); 332 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
427
428 /*
429 * Allocate buffer and the sg list. The sg list array is allocated
430 * directly after the port_buffer struct.
431 */
432 buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
433 GFP_KERNEL);
434 if (!buf) 333 if (!buf)
435 goto fail; 334 goto fail;
436 335 buf->buf = kzalloc(buf_size, GFP_KERNEL);
437 buf->sgpages = pages;
438 if (pages > 0) {
439 buf->dev = NULL;
440 buf->buf = NULL;
441 return buf;
442 }
443
444 if (is_rproc_serial(vq->vdev)) {
445 /*
446 * Allocate DMA memory from ancestor. When a virtio
447 * device is created by remoteproc, the DMA memory is
448 * associated with the grandparent device:
449 * vdev => rproc => platform-dev.
450 * The code here would have been less quirky if
451 * DMA_MEMORY_INCLUDES_CHILDREN had been supported
452 * in dma-coherent.c
453 */
454 if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
455 goto free_buf;
456 buf->dev = vq->vdev->dev.parent->parent;
457
458 /* Increase device refcnt to avoid freeing it */
459 get_device(buf->dev);
460 buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
461 GFP_KERNEL);
462 } else {
463 buf->dev = NULL;
464 buf->buf = kmalloc(buf_size, GFP_KERNEL);
465 }
466
467 if (!buf->buf) 336 if (!buf->buf)
468 goto free_buf; 337 goto free_buf;
469 buf->len = 0; 338 buf->len = 0;
@@ -478,19 +347,17 @@ fail:
478} 347}
479 348
480/* Callers should take appropriate locks */ 349/* Callers should take appropriate locks */
481static struct port_buffer *get_inbuf(struct port *port) 350static void *get_inbuf(struct port *port)
482{ 351{
483 struct port_buffer *buf; 352 struct port_buffer *buf;
353 struct virtqueue *vq;
484 unsigned int len; 354 unsigned int len;
485 355
486 if (port->inbuf) 356 vq = port->in_vq;
487 return port->inbuf; 357 buf = virtqueue_get_buf(vq, &len);
488
489 buf = virtqueue_get_buf(port->in_vq, &len);
490 if (buf) { 358 if (buf) {
491 buf->len = len; 359 buf->len = len;
492 buf->offset = 0; 360 buf->offset = 0;
493 port->stats.bytes_received += len;
494 } 361 }
495 return buf; 362 return buf;
496} 363}
@@ -508,10 +375,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
508 375
509 sg_init_one(sg, buf->buf, buf->size); 376 sg_init_one(sg, buf->buf, buf->size);
510 377
511 ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); 378 ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
512 virtqueue_kick(vq); 379 virtqueue_kick(vq);
513 if (!ret)
514 ret = vq->num_free;
515 return ret; 380 return ret;
516} 381}
517 382
@@ -519,27 +384,32 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
519static void discard_port_data(struct port *port) 384static void discard_port_data(struct port *port)
520{ 385{
521 struct port_buffer *buf; 386 struct port_buffer *buf;
522 unsigned int err; 387 struct virtqueue *vq;
388 unsigned int len;
389 int ret;
523 390
524 if (!port->portdev) { 391 if (!port->portdev) {
525 /* Device has been unplugged. vqs are already gone. */ 392 /* Device has been unplugged. vqs are already gone. */
526 return; 393 return;
527 } 394 }
528 buf = get_inbuf(port); 395 vq = port->in_vq;
396 if (port->inbuf)
397 buf = port->inbuf;
398 else
399 buf = virtqueue_get_buf(vq, &len);
529 400
530 err = 0; 401 ret = 0;
531 while (buf) { 402 while (buf) {
532 port->stats.bytes_discarded += buf->len - buf->offset; 403 if (add_inbuf(vq, buf) < 0) {
533 if (add_inbuf(port->in_vq, buf) < 0) { 404 ret++;
534 err++; 405 free_buf(buf);
535 free_buf(buf, false);
536 } 406 }
537 port->inbuf = NULL; 407 buf = virtqueue_get_buf(vq, &len);
538 buf = get_inbuf(port);
539 } 408 }
540 if (err) 409 port->inbuf = NULL;
410 if (ret)
541 dev_warn(port->dev, "Errors adding %d buffers back to vq\n", 411 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
542 err); 412 ret);
543} 413}
544 414
545static bool port_has_data(struct port *port) 415static bool port_has_data(struct port *port)
@@ -547,12 +417,18 @@ static bool port_has_data(struct port *port)
547 unsigned long flags; 417 unsigned long flags;
548 bool ret; 418 bool ret;
549 419
550 ret = false;
551 spin_lock_irqsave(&port->inbuf_lock, flags); 420 spin_lock_irqsave(&port->inbuf_lock, flags);
421 if (port->inbuf) {
422 ret = true;
423 goto out;
424 }
552 port->inbuf = get_inbuf(port); 425 port->inbuf = get_inbuf(port);
553 if (port->inbuf) 426 if (port->inbuf) {
554 ret = true; 427 ret = true;
555 428 goto out;
429 }
430 ret = false;
431out:
556 spin_unlock_irqrestore(&port->inbuf_lock, flags); 432 spin_unlock_irqrestore(&port->inbuf_lock, flags);
557 return ret; 433 return ret;
558} 434}
@@ -575,7 +451,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
575 vq = portdev->c_ovq; 451 vq = portdev->c_ovq;
576 452
577 sg_init_one(sg, &cpkt, sizeof(cpkt)); 453 sg_init_one(sg, &cpkt, sizeof(cpkt));
578 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { 454 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
579 virtqueue_kick(vq); 455 virtqueue_kick(vq);
580 while (!virtqueue_get_buf(vq, &len)) 456 while (!virtqueue_get_buf(vq, &len))
581 cpu_relax(); 457 cpu_relax();
@@ -592,11 +468,10 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
592 return 0; 468 return 0;
593} 469}
594 470
595
596/* Callers must take the port->outvq_lock */ 471/* Callers must take the port->outvq_lock */
597static void reclaim_consumed_buffers(struct port *port) 472static void reclaim_consumed_buffers(struct port *port)
598{ 473{
599 struct port_buffer *buf; 474 void *buf;
600 unsigned int len; 475 unsigned int len;
601 476
602 if (!port->portdev) { 477 if (!port->portdev) {
@@ -604,17 +479,17 @@ static void reclaim_consumed_buffers(struct port *port)
604 return; 479 return;
605 } 480 }
606 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 481 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
607 free_buf(buf, false); 482 kfree(buf);
608 port->outvq_full = false; 483 port->outvq_full = false;
609 } 484 }
610} 485}
611 486
612static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, 487static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
613 int nents, size_t in_count, 488 bool nonblock)
614 void *data, bool nonblock)
615{ 489{
490 struct scatterlist sg[1];
616 struct virtqueue *out_vq; 491 struct virtqueue *out_vq;
617 int err; 492 ssize_t ret;
618 unsigned long flags; 493 unsigned long flags;
619 unsigned int len; 494 unsigned int len;
620 495
@@ -624,17 +499,18 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
624 499
625 reclaim_consumed_buffers(port); 500 reclaim_consumed_buffers(port);
626 501
627 err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); 502 sg_init_one(sg, in_buf, in_count);
503 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
628 504
629 /* Tell Host to go! */ 505 /* Tell Host to go! */
630 virtqueue_kick(out_vq); 506 virtqueue_kick(out_vq);
631 507
632 if (err) { 508 if (ret < 0) {
633 in_count = 0; 509 in_count = 0;
634 goto done; 510 goto done;
635 } 511 }
636 512
637 if (out_vq->num_free == 0) 513 if (ret == 0)
638 port->outvq_full = true; 514 port->outvq_full = true;
639 515
640 if (nonblock) 516 if (nonblock)
@@ -653,8 +529,6 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
653 cpu_relax(); 529 cpu_relax();
654done: 530done:
655 spin_unlock_irqrestore(&port->outvq_lock, flags); 531 spin_unlock_irqrestore(&port->outvq_lock, flags);
656
657 port->stats.bytes_sent += in_count;
658 /* 532 /*
659 * We're expected to return the amount of data we wrote -- all 533 * We're expected to return the amount of data we wrote -- all
660 * of it 534 * of it
@@ -759,8 +633,8 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
759 if (filp->f_flags & O_NONBLOCK) 633 if (filp->f_flags & O_NONBLOCK)
760 return -EAGAIN; 634 return -EAGAIN;
761 635
762 ret = wait_event_freezable(port->waitqueue, 636 ret = wait_event_interruptible(port->waitqueue,
763 !will_read_block(port)); 637 !will_read_block(port));
764 if (ret < 0) 638 if (ret < 0)
765 return ret; 639 return ret;
766 } 640 }
@@ -783,34 +657,13 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
783 return fill_readbuf(port, ubuf, count, true); 657 return fill_readbuf(port, ubuf, count, true);
784} 658}
785 659
786static int wait_port_writable(struct port *port, bool nonblock)
787{
788 int ret;
789
790 if (will_write_block(port)) {
791 if (nonblock)
792 return -EAGAIN;
793
794 ret = wait_event_freezable(port->waitqueue,
795 !will_write_block(port));
796 if (ret < 0)
797 return ret;
798 }
799 /* Port got hot-unplugged. */
800 if (!port->guest_connected)
801 return -ENODEV;
802
803 return 0;
804}
805
806static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, 660static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
807 size_t count, loff_t *offp) 661 size_t count, loff_t *offp)
808{ 662{
809 struct port *port; 663 struct port *port;
810 struct port_buffer *buf; 664 char *buf;
811 ssize_t ret; 665 ssize_t ret;
812 bool nonblock; 666 bool nonblock;
813 struct scatterlist sg[1];
814 667
815 /* Userspace could be out to fool us */ 668 /* Userspace could be out to fool us */
816 if (!count) 669 if (!count)
@@ -820,17 +673,26 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
820 673
821 nonblock = filp->f_flags & O_NONBLOCK; 674 nonblock = filp->f_flags & O_NONBLOCK;
822 675
823 ret = wait_port_writable(port, nonblock); 676 if (will_write_block(port)) {
824 if (ret < 0) 677 if (nonblock)
825 return ret; 678 return -EAGAIN;
679
680 ret = wait_event_interruptible(port->waitqueue,
681 !will_write_block(port));
682 if (ret < 0)
683 return ret;
684 }
685 /* Port got hot-unplugged. */
686 if (!port->guest_connected)
687 return -ENODEV;
826 688
827 count = min((size_t)(32 * 1024), count); 689 count = min((size_t)(32 * 1024), count);
828 690
829 buf = alloc_buf(port->out_vq, count, 0); 691 buf = kmalloc(count, GFP_KERNEL);
830 if (!buf) 692 if (!buf)
831 return -ENOMEM; 693 return -ENOMEM;
832 694
833 ret = copy_from_user(buf->buf, ubuf, count); 695 ret = copy_from_user(buf, ubuf, count);
834 if (ret) { 696 if (ret) {
835 ret = -EFAULT; 697 ret = -EFAULT;
836 goto free_buf; 698 goto free_buf;
@@ -844,118 +706,17 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
844 * through to the host. 706 * through to the host.
845 */ 707 */
846 nonblock = true; 708 nonblock = true;
847 sg_init_one(sg, buf->buf, count); 709 ret = send_buf(port, buf, count, nonblock);
848 ret = __send_to_port(port, sg, 1, count, buf, nonblock);
849 710
850 if (nonblock && ret > 0) 711 if (nonblock && ret > 0)
851 goto out; 712 goto out;
852 713
853free_buf: 714free_buf:
854 free_buf(buf, true); 715 kfree(buf);
855out: 716out:
856 return ret; 717 return ret;
857} 718}
858 719
859struct sg_list {
860 unsigned int n;
861 unsigned int size;
862 size_t len;
863 struct scatterlist *sg;
864};
865
866static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
867 struct splice_desc *sd)
868{
869 struct sg_list *sgl = sd->u.data;
870 unsigned int offset, len;
871
872 if (sgl->n == sgl->size)
873 return 0;
874
875 /* Try lock this page */
876 if (buf->ops->steal(pipe, buf) == 0) {
877 /* Get reference and unlock page for moving */
878 get_page(buf->page);
879 unlock_page(buf->page);
880
881 len = min(buf->len, sd->len);
882 sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
883 } else {
884 /* Failback to copying a page */
885 struct page *page = alloc_page(GFP_KERNEL);
886 char *src = buf->ops->map(pipe, buf, 1);
887 char *dst;
888
889 if (!page)
890 return -ENOMEM;
891 dst = kmap(page);
892
893 offset = sd->pos & ~PAGE_MASK;
894
895 len = sd->len;
896 if (len + offset > PAGE_SIZE)
897 len = PAGE_SIZE - offset;
898
899 memcpy(dst + offset, src + buf->offset, len);
900
901 kunmap(page);
902 buf->ops->unmap(pipe, buf, src);
903
904 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
905 }
906 sgl->n++;
907 sgl->len += len;
908
909 return len;
910}
911
912/* Faster zero-copy write by splicing */
913static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
914 struct file *filp, loff_t *ppos,
915 size_t len, unsigned int flags)
916{
917 struct port *port = filp->private_data;
918 struct sg_list sgl;
919 ssize_t ret;
920 struct port_buffer *buf;
921 struct splice_desc sd = {
922 .total_len = len,
923 .flags = flags,
924 .pos = *ppos,
925 .u.data = &sgl,
926 };
927
928 /*
929 * Rproc_serial does not yet support splice. To support splice
930 * pipe_to_sg() must allocate dma-buffers and copy content from
931 * regular pages to dma pages. And alloc_buf and free_buf must
932 * support allocating and freeing such a list of dma-buffers.
933 */
934 if (is_rproc_serial(port->out_vq->vdev))
935 return -EINVAL;
936
937 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
938 if (ret < 0)
939 return ret;
940
941 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
942 if (!buf)
943 return -ENOMEM;
944
945 sgl.n = 0;
946 sgl.len = 0;
947 sgl.size = pipe->nrbufs;
948 sgl.sg = buf->sg;
949 sg_init_table(sgl.sg, sgl.size);
950 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
951 if (likely(ret > 0))
952 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
953
954 if (unlikely(ret <= 0))
955 free_buf(buf, true);
956 return ret;
957}
958
959static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 720static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
960{ 721{
961 struct port *port; 722 struct port *port;
@@ -1001,7 +762,6 @@ static int port_fops_release(struct inode *inode, struct file *filp)
1001 reclaim_consumed_buffers(port); 762 reclaim_consumed_buffers(port);
1002 spin_unlock_irq(&port->outvq_lock); 763 spin_unlock_irq(&port->outvq_lock);
1003 764
1004 reclaim_dma_bufs();
1005 /* 765 /*
1006 * Locks aren't necessary here as a port can't be opened after 766 * Locks aren't necessary here as a port can't be opened after
1007 * unplug, and if a port isn't unplugged, a kref would already 767 * unplug, and if a port isn't unplugged, a kref would already
@@ -1088,7 +848,6 @@ static const struct file_operations port_fops = {
1088 .open = port_fops_open, 848 .open = port_fops_open,
1089 .read = port_fops_read, 849 .read = port_fops_read,
1090 .write = port_fops_write, 850 .write = port_fops_write,
1091 .splice_write = port_fops_splice_write,
1092 .poll = port_fops_poll, 851 .poll = port_fops_poll,
1093 .release = port_fops_release, 852 .release = port_fops_release,
1094 .fasync = port_fops_fasync, 853 .fasync = port_fops_fasync,
@@ -1106,7 +865,6 @@ static const struct file_operations port_fops = {
1106static int put_chars(u32 vtermno, const char *buf, int count) 865static int put_chars(u32 vtermno, const char *buf, int count)
1107{ 866{
1108 struct port *port; 867 struct port *port;
1109 struct scatterlist sg[1];
1110 868
1111 if (unlikely(early_put_chars)) 869 if (unlikely(early_put_chars))
1112 return early_put_chars(vtermno, buf, count); 870 return early_put_chars(vtermno, buf, count);
@@ -1115,8 +873,7 @@ static int put_chars(u32 vtermno, const char *buf, int count)
1115 if (!port) 873 if (!port)
1116 return -EPIPE; 874 return -EPIPE;
1117 875
1118 sg_init_one(sg, buf, count); 876 return send_buf(port, (void *)buf, count, false);
1119 return __send_to_port(port, sg, 1, count, (void *)buf, false);
1120} 877}
1121 878
1122/* 879/*
@@ -1153,10 +910,7 @@ static void resize_console(struct port *port)
1153 return; 910 return;
1154 911
1155 vdev = port->portdev->vdev; 912 vdev = port->portdev->vdev;
1156 913 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1157 /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
1158 if (!is_rproc_serial(vdev) &&
1159 virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1160 hvc_resize(port->cons.hvc, port->cons.ws); 914 hvc_resize(port->cons.hvc, port->cons.ws);
1161} 915}
1162 916
@@ -1276,6 +1030,12 @@ static struct attribute_group port_attribute_group = {
1276 .attrs = port_sysfs_entries, 1030 .attrs = port_sysfs_entries,
1277}; 1031};
1278 1032
1033static int debugfs_open(struct inode *inode, struct file *filp)
1034{
1035 filp->private_data = inode->i_private;
1036 return 0;
1037}
1038
1279static ssize_t debugfs_read(struct file *filp, char __user *ubuf, 1039static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1280 size_t count, loff_t *offp) 1040 size_t count, loff_t *offp)
1281{ 1041{
@@ -1299,14 +1059,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1299 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1059 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1300 "outvq_full: %d\n", port->outvq_full); 1060 "outvq_full: %d\n", port->outvq_full);
1301 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1061 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1302 "bytes_sent: %lu\n", port->stats.bytes_sent);
1303 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1304 "bytes_received: %lu\n",
1305 port->stats.bytes_received);
1306 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1307 "bytes_discarded: %lu\n",
1308 port->stats.bytes_discarded);
1309 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1310 "is_console: %s\n", 1062 "is_console: %s\n",
1311 is_console_port(port) ? "yes" : "no"); 1063 is_console_port(port) ? "yes" : "no");
1312 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1064 out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -1319,7 +1071,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1319 1071
1320static const struct file_operations port_debugfs_ops = { 1072static const struct file_operations port_debugfs_ops = {
1321 .owner = THIS_MODULE, 1073 .owner = THIS_MODULE,
1322 .open = simple_open, 1074 .open = debugfs_open,
1323 .read = debugfs_read, 1075 .read = debugfs_read,
1324}; 1076};
1325 1077
@@ -1340,7 +1092,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1340 1092
1341 nr_added_bufs = 0; 1093 nr_added_bufs = 0;
1342 do { 1094 do {
1343 buf = alloc_buf(vq, PAGE_SIZE, 0); 1095 buf = alloc_buf(PAGE_SIZE);
1344 if (!buf) 1096 if (!buf)
1345 break; 1097 break;
1346 1098
@@ -1348,7 +1100,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1348 ret = add_inbuf(vq, buf); 1100 ret = add_inbuf(vq, buf);
1349 if (ret < 0) { 1101 if (ret < 0) {
1350 spin_unlock_irq(lock); 1102 spin_unlock_irq(lock);
1351 free_buf(buf, true); 1103 free_buf(buf);
1352 break; 1104 break;
1353 } 1105 }
1354 nr_added_bufs++; 1106 nr_added_bufs++;
@@ -1391,7 +1143,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1391 port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 1143 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1392 1144
1393 port->host_connected = port->guest_connected = false; 1145 port->host_connected = port->guest_connected = false;
1394 port->stats = (struct port_stats) { 0 };
1395 1146
1396 port->outvq_full = false; 1147 port->outvq_full = false;
1397 1148
@@ -1436,18 +1187,10 @@ static int add_port(struct ports_device *portdev, u32 id)
1436 goto free_device; 1187 goto free_device;
1437 } 1188 }
1438 1189
1439 if (is_rproc_serial(port->portdev->vdev)) 1190 /*
1440 /* 1191 * If we're not using multiport support, this has to be a console port
1441 * For rproc_serial assume remote processor is connected. 1192 */
1442 * rproc_serial does not want the console port, only 1193 if (!use_multiport(port->portdev)) {
1443 * the generic port implementation.
1444 */
1445 port->host_connected = true;
1446 else if (!use_multiport(port->portdev)) {
1447 /*
1448 * If we're not using multiport support,
1449 * this has to be a console port.
1450 */
1451 err = init_port_console(port); 1194 err = init_port_console(port);
1452 if (err) 1195 if (err)
1453 goto free_inbufs; 1196 goto free_inbufs;
@@ -1480,7 +1223,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1480 1223
1481free_inbufs: 1224free_inbufs:
1482 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1225 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1483 free_buf(buf, true); 1226 free_buf(buf);
1484free_device: 1227free_device:
1485 device_destroy(pdrvdata.class, port->dev->devt); 1228 device_destroy(pdrvdata.class, port->dev->devt);
1486free_cdev: 1229free_cdev:
@@ -1511,24 +1254,6 @@ static void remove_port(struct kref *kref)
1511 kfree(port); 1254 kfree(port);
1512} 1255}
1513 1256
1514static void remove_port_data(struct port *port)
1515{
1516 struct port_buffer *buf;
1517
1518 /* Remove unused data this port might have received. */
1519 discard_port_data(port);
1520
1521 reclaim_consumed_buffers(port);
1522
1523 /* Remove buffers we queued up for the Host to send us data in. */
1524 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1525 free_buf(buf, true);
1526
1527 /* Free pending buffers from the out-queue. */
1528 while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
1529 free_buf(buf, true);
1530}
1531
1532/* 1257/*
1533 * Port got unplugged. Remove port from portdev's list and drop the 1258 * Port got unplugged. Remove port from portdev's list and drop the
1534 * kref reference. If no userspace has this port opened, it will 1259 * kref reference. If no userspace has this port opened, it will
@@ -1536,6 +1261,8 @@ static void remove_port_data(struct port *port)
1536 */ 1261 */
1537static void unplug_port(struct port *port) 1262static void unplug_port(struct port *port)
1538{ 1263{
1264 struct port_buffer *buf;
1265
1539 spin_lock_irq(&port->portdev->ports_lock); 1266 spin_lock_irq(&port->portdev->ports_lock);
1540 list_del(&port->list); 1267 list_del(&port->list);
1541 spin_unlock_irq(&port->portdev->ports_lock); 1268 spin_unlock_irq(&port->portdev->ports_lock);
@@ -1556,7 +1283,14 @@ static void unplug_port(struct port *port)
1556 hvc_remove(port->cons.hvc); 1283 hvc_remove(port->cons.hvc);
1557 } 1284 }
1558 1285
1559 remove_port_data(port); 1286 /* Remove unused data this port might have received. */
1287 discard_port_data(port);
1288
1289 reclaim_consumed_buffers(port);
1290
1291 /* Remove buffers we queued up for the Host to send us data in. */
1292 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1293 free_buf(buf);
1560 1294
1561 /* 1295 /*
1562 * We should just assume the device itself has gone off -- 1296 * We should just assume the device itself has gone off --
@@ -1618,7 +1352,6 @@ static void handle_control_message(struct ports_device *portdev,
1618 break; 1352 break;
1619 1353
1620 init_port_console(port); 1354 init_port_console(port);
1621 complete(&early_console_added);
1622 /* 1355 /*
1623 * Could remove the port here in case init fails - but 1356 * Could remove the port here in case init fails - but
1624 * have to notify the host first. 1357 * have to notify the host first.
@@ -1661,13 +1394,6 @@ static void handle_control_message(struct ports_device *portdev,
1661 break; 1394 break;
1662 case VIRTIO_CONSOLE_PORT_NAME: 1395 case VIRTIO_CONSOLE_PORT_NAME:
1663 /* 1396 /*
1664 * If we woke up after hibernation, we can get this
1665 * again. Skip it in that case.
1666 */
1667 if (port->name)
1668 break;
1669
1670 /*
1671 * Skip the size of the header and the cpkt to get the size 1397 * Skip the size of the header and the cpkt to get the size
1672 * of the name that was sent 1398 * of the name that was sent
1673 */ 1399 */
@@ -1728,7 +1454,7 @@ static void control_work_handler(struct work_struct *work)
1728 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1454 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1729 dev_warn(&portdev->vdev->dev, 1455 dev_warn(&portdev->vdev->dev,
1730 "Error adding buffer to queue\n"); 1456 "Error adding buffer to queue\n");
1731 free_buf(buf, false); 1457 free_buf(buf);
1732 } 1458 }
1733 } 1459 }
1734 spin_unlock(&portdev->cvq_lock); 1460 spin_unlock(&portdev->cvq_lock);
@@ -1755,7 +1481,8 @@ static void in_intr(struct virtqueue *vq)
1755 return; 1481 return;
1756 1482
1757 spin_lock_irqsave(&port->inbuf_lock, flags); 1483 spin_lock_irqsave(&port->inbuf_lock, flags);
1758 port->inbuf = get_inbuf(port); 1484 if (!port->inbuf)
1485 port->inbuf = get_inbuf(port);
1759 1486
1760 /* 1487 /*
1761 * Don't queue up data when port is closed. This condition 1488 * Don't queue up data when port is closed. This condition
@@ -1836,7 +1563,7 @@ static int init_vqs(struct ports_device *portdev)
1836 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1563 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1837 GFP_KERNEL); 1564 GFP_KERNEL);
1838 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || 1565 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1839 !portdev->out_vqs) { 1566 !portdev->out_vqs) {
1840 err = -ENOMEM; 1567 err = -ENOMEM;
1841 goto free; 1568 goto free;
1842 } 1569 }
@@ -1908,28 +1635,6 @@ static const struct file_operations portdev_fops = {
1908 .owner = THIS_MODULE, 1635 .owner = THIS_MODULE,
1909}; 1636};
1910 1637
1911static void remove_vqs(struct ports_device *portdev)
1912{
1913 portdev->vdev->config->del_vqs(portdev->vdev);
1914 kfree(portdev->in_vqs);
1915 kfree(portdev->out_vqs);
1916}
1917
1918static void remove_controlq_data(struct ports_device *portdev)
1919{
1920 struct port_buffer *buf;
1921 unsigned int len;
1922
1923 if (!use_multiport(portdev))
1924 return;
1925
1926 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1927 free_buf(buf, true);
1928
1929 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1930 free_buf(buf, true);
1931}
1932
1933/* 1638/*
1934 * Once we're further in boot, we get probed like any other virtio 1639 * Once we're further in boot, we get probed like any other virtio
1935 * device. 1640 * device.
@@ -1938,15 +1643,11 @@ static void remove_controlq_data(struct ports_device *portdev)
1938 * config space to see how many ports the host has spawned. We 1643 * config space to see how many ports the host has spawned. We
1939 * initialize each port found. 1644 * initialize each port found.
1940 */ 1645 */
1941static int virtcons_probe(struct virtio_device *vdev) 1646static int __devinit virtcons_probe(struct virtio_device *vdev)
1942{ 1647{
1943 struct ports_device *portdev; 1648 struct ports_device *portdev;
1944 int err; 1649 int err;
1945 bool multiport; 1650 bool multiport;
1946 bool early = early_put_chars != NULL;
1947
1948 /* Ensure to read early_put_chars now */
1949 barrier();
1950 1651
1951 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); 1652 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1952 if (!portdev) { 1653 if (!portdev) {
@@ -1974,14 +1675,12 @@ static int virtcons_probe(struct virtio_device *vdev)
1974 1675
1975 multiport = false; 1676 multiport = false;
1976 portdev->config.max_nr_ports = 1; 1677 portdev->config.max_nr_ports = 1;
1977 1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
1978 /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
1979 if (!is_rproc_serial(vdev) &&
1980 virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1981 offsetof(struct virtio_console_config,
1982 max_nr_ports),
1983 &portdev->config.max_nr_ports) == 0) {
1984 multiport = true; 1679 multiport = true;
1680 vdev->config->get(vdev, offsetof(struct virtio_console_config,
1681 max_nr_ports),
1682 &portdev->config.max_nr_ports,
1683 sizeof(portdev->config.max_nr_ports));
1985 } 1684 }
1986 1685
1987 err = init_vqs(portdev); 1686 err = init_vqs(portdev);
@@ -2020,26 +1719,15 @@ static int virtcons_probe(struct virtio_device *vdev)
2020 1719
2021 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1720 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2022 VIRTIO_CONSOLE_DEVICE_READY, 1); 1721 VIRTIO_CONSOLE_DEVICE_READY, 1);
2023
2024 /*
2025 * If there was an early virtio console, assume that there are no
2026 * other consoles. We need to wait until the hvc_alloc matches the
2027 * hvc_instantiate, otherwise tty_open will complain, resulting in
2028 * a "Warning: unable to open an initial console" boot failure.
2029 * Without multiport this is done in add_port above. With multiport
2030 * this might take some host<->guest communication - thus we have to
2031 * wait.
2032 */
2033 if (multiport && early)
2034 wait_for_completion(&early_console_added);
2035
2036 return 0; 1722 return 0;
2037 1723
2038free_vqs: 1724free_vqs:
2039 /* The host might want to notify mgmt sw about device add failure */ 1725 /* The host might want to notify mgmt sw about device add failure */
2040 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1726 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2041 VIRTIO_CONSOLE_DEVICE_READY, 0); 1727 VIRTIO_CONSOLE_DEVICE_READY, 0);
2042 remove_vqs(portdev); 1728 vdev->config->del_vqs(vdev);
1729 kfree(portdev->in_vqs);
1730 kfree(portdev->out_vqs);
2043free_chrdev: 1731free_chrdev:
2044 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1732 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2045free: 1733free:
@@ -2077,8 +1765,21 @@ static void virtcons_remove(struct virtio_device *vdev)
2077 * have to just stop using the port, as the vqs are going 1765 * have to just stop using the port, as the vqs are going
2078 * away. 1766 * away.
2079 */ 1767 */
2080 remove_controlq_data(portdev); 1768 if (use_multiport(portdev)) {
2081 remove_vqs(portdev); 1769 struct port_buffer *buf;
1770 unsigned int len;
1771
1772 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1773 free_buf(buf);
1774
1775 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1776 free_buf(buf);
1777 }
1778
1779 vdev->config->del_vqs(vdev);
1780 kfree(portdev->in_vqs);
1781 kfree(portdev->out_vqs);
1782
2082 kfree(portdev); 1783 kfree(portdev);
2083} 1784}
2084 1785
@@ -2092,85 +1793,6 @@ static unsigned int features[] = {
2092 VIRTIO_CONSOLE_F_MULTIPORT, 1793 VIRTIO_CONSOLE_F_MULTIPORT,
2093}; 1794};
2094 1795
2095static struct virtio_device_id rproc_serial_id_table[] = {
2096#if IS_ENABLED(CONFIG_REMOTEPROC)
2097 { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
2098#endif
2099 { 0 },
2100};
2101
2102static unsigned int rproc_serial_features[] = {
2103};
2104
2105#ifdef CONFIG_PM
2106static int virtcons_freeze(struct virtio_device *vdev)
2107{
2108 struct ports_device *portdev;
2109 struct port *port;
2110
2111 portdev = vdev->priv;
2112
2113 vdev->config->reset(vdev);
2114
2115 virtqueue_disable_cb(portdev->c_ivq);
2116 cancel_work_sync(&portdev->control_work);
2117 /*
2118 * Once more: if control_work_handler() was running, it would
2119 * enable the cb as the last step.
2120 */
2121 virtqueue_disable_cb(portdev->c_ivq);
2122 remove_controlq_data(portdev);
2123
2124 list_for_each_entry(port, &portdev->ports, list) {
2125 virtqueue_disable_cb(port->in_vq);
2126 virtqueue_disable_cb(port->out_vq);
2127 /*
2128 * We'll ask the host later if the new invocation has
2129 * the port opened or closed.
2130 */
2131 port->host_connected = false;
2132 remove_port_data(port);
2133 }
2134 remove_vqs(portdev);
2135
2136 return 0;
2137}
2138
2139static int virtcons_restore(struct virtio_device *vdev)
2140{
2141 struct ports_device *portdev;
2142 struct port *port;
2143 int ret;
2144
2145 portdev = vdev->priv;
2146
2147 ret = init_vqs(portdev);
2148 if (ret)
2149 return ret;
2150
2151 if (use_multiport(portdev))
2152 fill_queue(portdev->c_ivq, &portdev->cvq_lock);
2153
2154 list_for_each_entry(port, &portdev->ports, list) {
2155 port->in_vq = portdev->in_vqs[port->id];
2156 port->out_vq = portdev->out_vqs[port->id];
2157
2158 fill_queue(port->in_vq, &port->inbuf_lock);
2159
2160 /* Get port open/close status on the host */
2161 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
2162
2163 /*
2164 * If a port was open at the time of suspending, we
2165 * have to let the host know that it's still open.
2166 */
2167 if (port->guest_connected)
2168 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2169 }
2170 return 0;
2171}
2172#endif
2173
2174static struct virtio_driver virtio_console = { 1796static struct virtio_driver virtio_console = {
2175 .feature_table = features, 1797 .feature_table = features,
2176 .feature_table_size = ARRAY_SIZE(features), 1798 .feature_table_size = ARRAY_SIZE(features),
@@ -2180,20 +1802,6 @@ static struct virtio_driver virtio_console = {
2180 .probe = virtcons_probe, 1802 .probe = virtcons_probe,
2181 .remove = virtcons_remove, 1803 .remove = virtcons_remove,
2182 .config_changed = config_intr, 1804 .config_changed = config_intr,
2183#ifdef CONFIG_PM
2184 .freeze = virtcons_freeze,
2185 .restore = virtcons_restore,
2186#endif
2187};
2188
2189static struct virtio_driver virtio_rproc_serial = {
2190 .feature_table = rproc_serial_features,
2191 .feature_table_size = ARRAY_SIZE(rproc_serial_features),
2192 .driver.name = "virtio_rproc_serial",
2193 .driver.owner = THIS_MODULE,
2194 .id_table = rproc_serial_id_table,
2195 .probe = virtcons_probe,
2196 .remove = virtcons_remove,
2197}; 1805};
2198 1806
2199static int __init init(void) 1807static int __init init(void)
@@ -2215,33 +1823,12 @@ static int __init init(void)
2215 INIT_LIST_HEAD(&pdrvdata.consoles); 1823 INIT_LIST_HEAD(&pdrvdata.consoles);
2216 INIT_LIST_HEAD(&pdrvdata.portdevs); 1824 INIT_LIST_HEAD(&pdrvdata.portdevs);
2217 1825
2218 err = register_virtio_driver(&virtio_console); 1826 return register_virtio_driver(&virtio_console);
2219 if (err < 0) {
2220 pr_err("Error %d registering virtio driver\n", err);
2221 goto free;
2222 }
2223 err = register_virtio_driver(&virtio_rproc_serial);
2224 if (err < 0) {
2225 pr_err("Error %d registering virtio rproc serial driver\n",
2226 err);
2227 goto unregister;
2228 }
2229 return 0;
2230unregister:
2231 unregister_virtio_driver(&virtio_console);
2232free:
2233 if (pdrvdata.debugfs_dir)
2234 debugfs_remove_recursive(pdrvdata.debugfs_dir);
2235 class_destroy(pdrvdata.class);
2236 return err;
2237} 1827}
2238 1828
2239static void __exit fini(void) 1829static void __exit fini(void)
2240{ 1830{
2241 reclaim_dma_bufs();
2242
2243 unregister_virtio_driver(&virtio_console); 1831 unregister_virtio_driver(&virtio_console);
2244 unregister_virtio_driver(&virtio_rproc_serial);
2245 1832
2246 class_destroy(pdrvdata.class); 1833 class_destroy(pdrvdata.class);
2247 if (pdrvdata.debugfs_dir) 1834 if (pdrvdata.debugfs_dir)
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 5224da5202d..e90e1c74fd4 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -89,6 +89,7 @@
89 89
90#include <asm/io.h> 90#include <asm/io.h>
91#include <asm/uaccess.h> 91#include <asm/uaccess.h>
92#include <asm/system.h>
92 93
93#ifdef CONFIG_OF 94#ifdef CONFIG_OF
94/* For open firmware. */ 95/* For open firmware. */
@@ -167,7 +168,6 @@ static const struct config_registers v4_config_registers = {
167 .BOOTSTS = UNIMPLEMENTED, 168 .BOOTSTS = UNIMPLEMENTED,
168 .CTL_1 = UNIMPLEMENTED, 169 .CTL_1 = UNIMPLEMENTED,
169}; 170};
170
171static const struct config_registers v5_config_registers = { 171static const struct config_registers v5_config_registers = {
172 .CRC = 0, 172 .CRC = 0,
173 .FAR = 1, 173 .FAR = 1,
@@ -193,31 +193,6 @@ static const struct config_registers v5_config_registers = {
193 .CTL_1 = 19, 193 .CTL_1 = 19,
194}; 194};
195 195
196static const struct config_registers v6_config_registers = {
197 .CRC = 0,
198 .FAR = 1,
199 .FDRI = 2,
200 .FDRO = 3,
201 .CMD = 4,
202 .CTL = 5,
203 .MASK = 6,
204 .STAT = 7,
205 .LOUT = 8,
206 .COR = 9,
207 .MFWR = 10,
208 .FLR = UNIMPLEMENTED,
209 .KEY = UNIMPLEMENTED,
210 .CBC = 11,
211 .IDCODE = 12,
212 .AXSS = 13,
213 .C0R_1 = 14,
214 .CSOB = 15,
215 .WBSTAR = 16,
216 .TIMER = 17,
217 .BOOTSTS = 22,
218 .CTL_1 = 24,
219};
220
221/** 196/**
222 * hwicap_command_desync - Send a DESYNC command to the ICAP port. 197 * hwicap_command_desync - Send a DESYNC command to the ICAP port.
223 * @drvdata: a pointer to the drvdata. 198 * @drvdata: a pointer to the drvdata.
@@ -595,7 +570,7 @@ static const struct file_operations hwicap_fops = {
595 .llseek = noop_llseek, 570 .llseek = noop_llseek,
596}; 571};
597 572
598static int hwicap_setup(struct device *dev, int id, 573static int __devinit hwicap_setup(struct device *dev, int id,
599 const struct resource *regs_res, 574 const struct resource *regs_res,
600 const struct hwicap_driver_config *config, 575 const struct hwicap_driver_config *config,
601 const struct config_registers *config_regs) 576 const struct config_registers *config_regs)
@@ -717,7 +692,7 @@ static struct hwicap_driver_config fifo_icap_config = {
717 .reset = fifo_icap_reset, 692 .reset = fifo_icap_reset,
718}; 693};
719 694
720static int hwicap_remove(struct device *dev) 695static int __devexit hwicap_remove(struct device *dev)
721{ 696{
722 struct hwicap_drvdata *drvdata; 697 struct hwicap_drvdata *drvdata;
723 698
@@ -740,7 +715,7 @@ static int hwicap_remove(struct device *dev)
740} 715}
741 716
742#ifdef CONFIG_OF 717#ifdef CONFIG_OF
743static int hwicap_of_probe(struct platform_device *op, 718static int __devinit hwicap_of_probe(struct platform_device *op,
744 const struct hwicap_driver_config *config) 719 const struct hwicap_driver_config *config)
745{ 720{
746 struct resource res; 721 struct resource res;
@@ -770,8 +745,6 @@ static int hwicap_of_probe(struct platform_device *op,
770 regs = &v4_config_registers; 745 regs = &v4_config_registers;
771 } else if (!strcmp(family, "virtex5")) { 746 } else if (!strcmp(family, "virtex5")) {
772 regs = &v5_config_registers; 747 regs = &v5_config_registers;
773 } else if (!strcmp(family, "virtex6")) {
774 regs = &v6_config_registers;
775 } 748 }
776 } 749 }
777 return hwicap_setup(&op->dev, id ? *id : -1, &res, config, 750 return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
@@ -785,8 +758,8 @@ static inline int hwicap_of_probe(struct platform_device *op,
785} 758}
786#endif /* CONFIG_OF */ 759#endif /* CONFIG_OF */
787 760
788static const struct of_device_id hwicap_of_match[]; 761static const struct of_device_id __devinitconst hwicap_of_match[];
789static int hwicap_drv_probe(struct platform_device *pdev) 762static int __devinit hwicap_drv_probe(struct platform_device *pdev)
790{ 763{
791 const struct of_device_id *match; 764 const struct of_device_id *match;
792 struct resource *res; 765 struct resource *res;
@@ -813,8 +786,6 @@ static int hwicap_drv_probe(struct platform_device *pdev)
813 regs = &v4_config_registers; 786 regs = &v4_config_registers;
814 } else if (!strcmp(family, "virtex5")) { 787 } else if (!strcmp(family, "virtex5")) {
815 regs = &v5_config_registers; 788 regs = &v5_config_registers;
816 } else if (!strcmp(family, "virtex6")) {
817 regs = &v6_config_registers;
818 } 789 }
819 } 790 }
820 791
@@ -822,14 +793,14 @@ static int hwicap_drv_probe(struct platform_device *pdev)
822 &buffer_icap_config, regs); 793 &buffer_icap_config, regs);
823} 794}
824 795
825static int hwicap_drv_remove(struct platform_device *pdev) 796static int __devexit hwicap_drv_remove(struct platform_device *pdev)
826{ 797{
827 return hwicap_remove(&pdev->dev); 798 return hwicap_remove(&pdev->dev);
828} 799}
829 800
830#ifdef CONFIG_OF 801#ifdef CONFIG_OF
831/* Match table for device tree binding */ 802/* Match table for device tree binding */
832static const struct of_device_id hwicap_of_match[] = { 803static const struct of_device_id __devinitconst hwicap_of_match[] = {
833 { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, 804 { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
834 { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, 805 { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
835 {}, 806 {},
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index d31ee23c9f1..8cca11981c5 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -86,7 +86,7 @@ struct hwicap_driver_config {
86}; 86};
87 87
88/* Number of times to poll the done regsiter */ 88/* Number of times to poll the done regsiter */
89#define XHI_MAX_RETRIES 5000 89#define XHI_MAX_RETRIES 10
90 90
91/************ Constant Definitions *************/ 91/************ Constant Definitions *************/
92 92