diff options
Diffstat (limited to 'drivers')
220 files changed, 5748 insertions, 3205 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 7edf6d913c13..765fd1c56cd6 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -688,14 +688,6 @@ void __init acpi_early_init(void) | |||
688 | if (acpi_disabled) | 688 | if (acpi_disabled) |
689 | return; | 689 | return; |
690 | 690 | ||
691 | /* | ||
692 | * ACPI CA initializes acpi_dbg_level to non-zero, which means | ||
693 | * we get debug output merely by turning on CONFIG_ACPI_DEBUG. | ||
694 | * Turn it off so we don't get output unless the user specifies | ||
695 | * acpi.debug_level. | ||
696 | */ | ||
697 | acpi_dbg_level = 0; | ||
698 | |||
699 | printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); | 691 | printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); |
700 | 692 | ||
701 | /* enable workarounds, unless strict ACPI spec. compliance */ | 693 | /* enable workarounds, unless strict ACPI spec. compliance */ |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 11acaee14d66..bf79d83bdfbb 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry, | |||
384 | return irq; | 384 | return irq; |
385 | } | 385 | } |
386 | 386 | ||
387 | #ifdef CONFIG_X86_IO_APIC | ||
388 | extern int noioapicquirk; | ||
389 | |||
390 | static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) | ||
391 | { | ||
392 | struct pci_bus *bus_it; | ||
393 | |||
394 | for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { | ||
395 | if (!bus_it->self) | ||
396 | return 0; | ||
397 | |||
398 | printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor, | ||
399 | bus_it->self->device); | ||
400 | |||
401 | if (bus_it->self->irq_reroute_variant) | ||
402 | return bus_it->self->irq_reroute_variant; | ||
403 | } | ||
404 | return 0; | ||
405 | } | ||
406 | #endif /* CONFIG_X86_IO_APIC */ | ||
407 | |||
387 | /* | 408 | /* |
388 | * acpi_pci_irq_lookup | 409 | * acpi_pci_irq_lookup |
389 | * success: return IRQ >= 0 | 410 | * success: return IRQ >= 0 |
@@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus, | |||
413 | } | 434 | } |
414 | 435 | ||
415 | ret = func(entry, triggering, polarity, link); | 436 | ret = func(entry, triggering, polarity, link); |
437 | |||
438 | #ifdef CONFIG_X86_IO_APIC | ||
439 | /* | ||
440 | * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the | ||
441 | * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel | ||
442 | * does during interrupt handling). When this INTx generation cannot be | ||
443 | * disabled, we reroute these interrupts to their legacy equivalent to | ||
444 | * get rid of spurious interrupts. | ||
445 | */ | ||
446 | if (!noioapicquirk) { | ||
447 | switch (bridge_has_boot_interrupt_variant(bus)) { | ||
448 | case 0: | ||
449 | /* no rerouting necessary */ | ||
450 | break; | ||
451 | |||
452 | case INTEL_IRQ_REROUTE_VARIANT: | ||
453 | /* | ||
454 | * Remap according to INTx routing table in 6700PXH | ||
455 | * specs, intel order number 302628-002, section | ||
456 | * 2.15.2. Other chipsets (80332, ...) have the same | ||
457 | * mapping and are handled here as well. | ||
458 | */ | ||
459 | printk(KERN_INFO "pci irq %d -> rerouted to legacy " | ||
460 | "irq %d\n", ret, (ret % 4) + 16); | ||
461 | ret = (ret % 4) + 16; | ||
462 | break; | ||
463 | |||
464 | default: | ||
465 | printk(KERN_INFO "not rerouting irq %d to legacy irq: " | ||
466 | "unknown mapping\n", ret); | ||
467 | break; | ||
468 | } | ||
469 | } | ||
470 | #endif /* CONFIG_X86_IO_APIC */ | ||
471 | |||
416 | return ret; | 472 | return ret; |
417 | } | 473 | } |
418 | 474 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f8d746a9b81..38aca048e951 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state) | |||
374 | { | 374 | { |
375 | switch (boot_cpu_data.x86_vendor) { | 375 | switch (boot_cpu_data.x86_vendor) { |
376 | case X86_VENDOR_AMD: | 376 | case X86_VENDOR_AMD: |
377 | case X86_VENDOR_INTEL: | ||
377 | /* | 378 | /* |
378 | * AMD Fam10h TSC will tick in all | 379 | * AMD Fam10h TSC will tick in all |
379 | * C/P/S0/S1 states when this bit is set. | 380 | * C/P/S0/S1 states when this bit is set. |
380 | */ | 381 | */ |
381 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | 382 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
382 | return 0; | 383 | return 0; |
384 | |||
383 | /*FALL THROUGH*/ | 385 | /*FALL THROUGH*/ |
384 | case X86_VENDOR_INTEL: | ||
385 | /* Several cases known where TSC halts in C2 too */ | ||
386 | default: | 386 | default: |
387 | return state > ACPI_STATE_C1; | 387 | return state > ACPI_STATE_C1; |
388 | } | 388 | } |
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c index 670551b95e56..17ed5ac840f7 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/utilities/utglobal.c | |||
@@ -64,7 +64,7 @@ u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; | |||
64 | 64 | ||
65 | /* Debug switch - layer (component) mask */ | 65 | /* Debug switch - layer (component) mask */ |
66 | 66 | ||
67 | u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; | 67 | u32 acpi_dbg_layer = 0; |
68 | u32 acpi_gbl_nesting_level = 0; | 68 | u32 acpi_gbl_nesting_level = 0; |
69 | 69 | ||
70 | /* Debugger globals */ | 70 | /* Debugger globals */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5e2eb740df46..bc6695e3c848 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4050,17 +4050,70 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4050 | { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, | 4050 | { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, |
4051 | 4051 | ||
4052 | /* Seagate NCQ + FLUSH CACHE firmware bug */ | 4052 | /* Seagate NCQ + FLUSH CACHE firmware bug */ |
4053 | { "ST31500341AS", "9JU138", ATA_HORKAGE_NONCQ | | 4053 | { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | |
4054 | ATA_HORKAGE_FIRMWARE_WARN }, | 4054 | ATA_HORKAGE_FIRMWARE_WARN }, |
4055 | { "ST31000333AS", "9FZ136", ATA_HORKAGE_NONCQ | | 4055 | { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | |
4056 | ATA_HORKAGE_FIRMWARE_WARN }, | 4056 | ATA_HORKAGE_FIRMWARE_WARN }, |
4057 | { "ST3640623AS", "9FZ164", ATA_HORKAGE_NONCQ | | 4057 | { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | |
4058 | ATA_HORKAGE_FIRMWARE_WARN }, | 4058 | ATA_HORKAGE_FIRMWARE_WARN }, |
4059 | { "ST3640323AS", "9FZ134", ATA_HORKAGE_NONCQ | | 4059 | { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | |
4060 | ATA_HORKAGE_FIRMWARE_WARN }, | 4060 | ATA_HORKAGE_FIRMWARE_WARN }, |
4061 | { "ST3320813AS", "9FZ182", ATA_HORKAGE_NONCQ | | 4061 | { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | |
4062 | ATA_HORKAGE_FIRMWARE_WARN }, | 4062 | ATA_HORKAGE_FIRMWARE_WARN }, |
4063 | { "ST3320613AS", "9FZ162", ATA_HORKAGE_NONCQ | | 4063 | |
4064 | { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | | ||
4065 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4066 | { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | | ||
4067 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4068 | { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | | ||
4069 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4070 | { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | | ||
4071 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4072 | { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | | ||
4073 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4074 | |||
4075 | { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | | ||
4076 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4077 | { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | | ||
4078 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4079 | { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | | ||
4080 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4081 | { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | | ||
4082 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4083 | { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | | ||
4084 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4085 | |||
4086 | { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | | ||
4087 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4088 | { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | | ||
4089 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4090 | { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | | ||
4091 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4092 | { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | | ||
4093 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4094 | { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | | ||
4095 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4096 | |||
4097 | { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | | ||
4098 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4099 | { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | | ||
4100 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4101 | { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | | ||
4102 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4103 | { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | | ||
4104 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4105 | { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | | ||
4106 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4107 | |||
4108 | { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | | ||
4109 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4110 | { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | | ||
4111 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4112 | { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | | ||
4113 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4114 | { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | | ||
4115 | ATA_HORKAGE_FIRMWARE_WARN }, | ||
4116 | { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | | ||
4064 | ATA_HORKAGE_FIRMWARE_WARN }, | 4117 | ATA_HORKAGE_FIRMWARE_WARN }, |
4065 | 4118 | ||
4066 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4119 | /* Blacklist entries taken from Silicon Image 3124/3132 |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index a098ba8eaab6..e0c4f05d7d57 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -183,7 +183,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
183 | mask &= ~(0xF8 << ATA_SHIFT_UDMA); | 183 | mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) | 184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) |
185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); | 185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); |
186 | } | 186 | } else if (adev->class == ATA_DEV_ATAPI) |
187 | mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | ||
188 | |||
187 | return ata_bmdma_mode_filter(adev, mask); | 189 | return ata_bmdma_mode_filter(adev, mask); |
188 | } | 190 | } |
189 | 191 | ||
@@ -211,11 +213,15 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed) | |||
211 | 213 | ||
212 | static int hpt36x_cable_detect(struct ata_port *ap) | 214 | static int hpt36x_cable_detect(struct ata_port *ap) |
213 | { | 215 | { |
214 | u8 ata66; | ||
215 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 216 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
217 | u8 ata66; | ||
216 | 218 | ||
219 | /* | ||
220 | * Each channel of pata_hpt366 occupies separate PCI function | ||
221 | * as the primary channel and bit1 indicates the cable type. | ||
222 | */ | ||
217 | pci_read_config_byte(pdev, 0x5A, &ata66); | 223 | pci_read_config_byte(pdev, 0x5A, &ata66); |
218 | if (ata66 & (1 << ap->port_no)) | 224 | if (ata66 & 2) |
219 | return ATA_CBL_PATA40; | 225 | return ATA_CBL_PATA40; |
220 | return ATA_CBL_PATA80; | 226 | return ATA_CBL_PATA80; |
221 | } | 227 | } |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9364dc554257..9f7c543cc04b 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1693,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
1693 | for (i = 0; i <= h->highest_lun; i++) { | 1693 | for (i = 0; i <= h->highest_lun; i++) { |
1694 | int j; | 1694 | int j; |
1695 | drv_found = 0; | 1695 | drv_found = 0; |
1696 | |||
1697 | /* skip holes in the array from already deleted drives */ | ||
1698 | if (h->drv[i].raid_level == -1) | ||
1699 | continue; | ||
1700 | |||
1696 | for (j = 0; j < num_luns; j++) { | 1701 | for (j = 0; j < num_luns; j++) { |
1697 | memcpy(&lunid, &ld_buff->LUN[j][0], 4); | 1702 | memcpy(&lunid, &ld_buff->LUN[j][0], 4); |
1698 | lunid = le32_to_cpu(lunid); | 1703 | lunid = le32_to_cpu(lunid); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5c4ee70d5cf3..fb06ed659212 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -936,8 +936,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
936 | { | 936 | { |
937 | int err; | 937 | int err; |
938 | struct loop_func_table *xfer; | 938 | struct loop_func_table *xfer; |
939 | uid_t uid = current_uid(); | ||
939 | 940 | ||
940 | if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid && | 941 | if (lo->lo_encrypt_key_size && |
942 | lo->lo_key_owner != uid && | ||
941 | !capable(CAP_SYS_ADMIN)) | 943 | !capable(CAP_SYS_ADMIN)) |
942 | return -EPERM; | 944 | return -EPERM; |
943 | if (lo->lo_state != Lo_bound) | 945 | if (lo->lo_state != Lo_bound) |
@@ -992,7 +994,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
992 | if (info->lo_encrypt_key_size) { | 994 | if (info->lo_encrypt_key_size) { |
993 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, | 995 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, |
994 | info->lo_encrypt_key_size); | 996 | info->lo_encrypt_key_size); |
995 | lo->lo_key_owner = current->uid; | 997 | lo->lo_key_owner = uid; |
996 | } | 998 | } |
997 | 999 | ||
998 | return 0; | 1000 | return 0; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 43d6ba83a191..8783457b93d3 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -622,6 +622,16 @@ config HVC_BEAT | |||
622 | help | 622 | help |
623 | Toshiba's Cell Reference Set Beat Console device driver | 623 | Toshiba's Cell Reference Set Beat Console device driver |
624 | 624 | ||
625 | config HVC_IUCV | ||
626 | bool "z/VM IUCV Hypervisor console support (VM only)" | ||
627 | depends on S390 | ||
628 | select HVC_DRIVER | ||
629 | select IUCV | ||
630 | default y | ||
631 | help | ||
632 | This driver provides a Hypervisor console (HVC) back-end to access | ||
633 | a Linux (console) terminal via a z/VM IUCV communication path. | ||
634 | |||
625 | config HVC_XEN | 635 | config HVC_XEN |
626 | bool "Xen Hypervisor Console support" | 636 | bool "Xen Hypervisor Console support" |
627 | depends on XEN | 637 | depends on XEN |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 438f71317c5c..36151bae0d72 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o | |||
50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | 50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o |
51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o | 51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o |
52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o | 52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o |
53 | obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o | ||
53 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o | 54 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o |
54 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 55 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
55 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 56 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c new file mode 100644 index 000000000000..5ea7d7713fca --- /dev/null +++ b/drivers/char/hvc_iucv.c | |||
@@ -0,0 +1,850 @@ | |||
1 | /* | ||
2 | * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) | ||
3 | * | ||
4 | * This back-end for HVC provides terminal access via | ||
5 | * z/VM IUCV communication paths. | ||
6 | * | ||
7 | * Copyright IBM Corp. 2008. | ||
8 | * | ||
9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | ||
10 | */ | ||
11 | #define KMSG_COMPONENT "hvc_iucv" | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm/ebcdic.h> | ||
15 | #include <linux/mempool.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <net/iucv/iucv.h> | ||
19 | |||
20 | #include "hvc_console.h" | ||
21 | |||
22 | |||
23 | /* HVC backend for z/VM IUCV */ | ||
24 | #define HVC_IUCV_MAGIC 0xc9e4c3e5 | ||
25 | #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS | ||
26 | #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) | ||
27 | |||
28 | /* IUCV TTY message */ | ||
29 | #define MSG_VERSION 0x02 /* Message version */ | ||
30 | #define MSG_TYPE_ERROR 0x01 /* Error message */ | ||
31 | #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */ | ||
32 | #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */ | ||
33 | #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ | ||
34 | #define MSG_TYPE_DATA 0x10 /* Terminal data */ | ||
35 | |||
36 | #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) | ||
37 | struct iucv_tty_msg { | ||
38 | u8 version; /* Message version */ | ||
39 | u8 type; /* Message type */ | ||
40 | #define MSG_MAX_DATALEN (~(u16)0) | ||
41 | u16 datalen; /* Payload length */ | ||
42 | u8 data[]; /* Payload buffer */ | ||
43 | } __attribute__((packed)); | ||
44 | |||
45 | enum iucv_state_t { | ||
46 | IUCV_DISCONN = 0, | ||
47 | IUCV_CONNECTED = 1, | ||
48 | IUCV_SEVERED = 2, | ||
49 | }; | ||
50 | |||
51 | enum tty_state_t { | ||
52 | TTY_CLOSED = 0, | ||
53 | TTY_OPENED = 1, | ||
54 | }; | ||
55 | |||
56 | struct hvc_iucv_private { | ||
57 | struct hvc_struct *hvc; /* HVC console struct reference */ | ||
58 | u8 srv_name[8]; /* IUCV service name (ebcdic) */ | ||
59 | enum iucv_state_t iucv_state; /* IUCV connection status */ | ||
60 | enum tty_state_t tty_state; /* TTY status */ | ||
61 | struct iucv_path *path; /* IUCV path pointer */ | ||
62 | spinlock_t lock; /* hvc_iucv_private lock */ | ||
63 | struct list_head tty_outqueue; /* outgoing IUCV messages */ | ||
64 | struct list_head tty_inqueue; /* incoming IUCV messages */ | ||
65 | }; | ||
66 | |||
67 | struct iucv_tty_buffer { | ||
68 | struct list_head list; /* list pointer */ | ||
69 | struct iucv_message msg; /* store an incoming IUCV message */ | ||
70 | size_t offset; /* data buffer offset */ | ||
71 | struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ | ||
72 | }; | ||
73 | |||
74 | /* IUCV callback handler */ | ||
75 | static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); | ||
76 | static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); | ||
77 | static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); | ||
78 | static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); | ||
79 | |||
80 | |||
81 | /* Kernel module parameters */ | ||
82 | static unsigned long hvc_iucv_devices; | ||
83 | |||
84 | /* Array of allocated hvc iucv tty lines... */ | ||
85 | static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; | ||
86 | |||
87 | /* Kmem cache and mempool for iucv_tty_buffer elements */ | ||
88 | static struct kmem_cache *hvc_iucv_buffer_cache; | ||
89 | static mempool_t *hvc_iucv_mempool; | ||
90 | |||
91 | /* IUCV handler callback functions */ | ||
92 | static struct iucv_handler hvc_iucv_handler = { | ||
93 | .path_pending = hvc_iucv_path_pending, | ||
94 | .path_severed = hvc_iucv_path_severed, | ||
95 | .message_complete = hvc_iucv_msg_complete, | ||
96 | .message_pending = hvc_iucv_msg_pending, | ||
97 | }; | ||
98 | |||
99 | |||
100 | /** | ||
101 | * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. | ||
102 | * @num: The HVC virtual terminal number (vtermno) | ||
103 | * | ||
104 | * This function returns the struct hvc_iucv_private instance that corresponds | ||
105 | * to the HVC virtual terminal number specified as parameter @num. | ||
106 | */ | ||
107 | struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) | ||
108 | { | ||
109 | if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) | ||
110 | return NULL; | ||
111 | return hvc_iucv_table[num - HVC_IUCV_MAGIC]; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. | ||
116 | * @size: Size of the internal buffer used to store data. | ||
117 | * @flags: Memory allocation flags passed to mempool. | ||
118 | * | ||
119 | * This function allocates a new struct iucv_tty_buffer element and, optionally, | ||
120 | * allocates an internal data buffer with the specified size @size. | ||
121 | * Note: The total message size arises from the internal buffer size and the | ||
122 | * members of the iucv_tty_msg structure. | ||
123 | * | ||
124 | * The function returns NULL if memory allocation has failed. | ||
125 | */ | ||
126 | static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | ||
127 | { | ||
128 | struct iucv_tty_buffer *bufp; | ||
129 | |||
130 | bufp = mempool_alloc(hvc_iucv_mempool, flags); | ||
131 | if (!bufp) | ||
132 | return NULL; | ||
133 | memset(bufp, 0, sizeof(struct iucv_tty_buffer)); | ||
134 | |||
135 | if (size > 0) { | ||
136 | bufp->msg.length = MSG_SIZE(size); | ||
137 | bufp->mbuf = kmalloc(bufp->msg.length, flags); | ||
138 | if (!bufp->mbuf) { | ||
139 | mempool_free(bufp, hvc_iucv_mempool); | ||
140 | return NULL; | ||
141 | } | ||
142 | bufp->mbuf->version = MSG_VERSION; | ||
143 | bufp->mbuf->type = MSG_TYPE_DATA; | ||
144 | bufp->mbuf->datalen = (u16) size; | ||
145 | } | ||
146 | return bufp; | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. | ||
151 | * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. | ||
152 | * | ||
153 | * The destroy_tty_buffer() function frees the internal data buffer and returns | ||
154 | * the struct iucv_tty_buffer element back to the mempool for freeing. | ||
155 | */ | ||
156 | static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) | ||
157 | { | ||
158 | kfree(bufp->mbuf); | ||
159 | mempool_free(bufp, hvc_iucv_mempool); | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. | ||
164 | * @list: List head pointer to a list containing struct iucv_tty_buffer | ||
165 | * elements. | ||
166 | * | ||
167 | * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the | ||
168 | * list @list. | ||
169 | */ | ||
170 | static void destroy_tty_buffer_list(struct list_head *list) | ||
171 | { | ||
172 | struct iucv_tty_buffer *ent, *next; | ||
173 | |||
174 | list_for_each_entry_safe(ent, next, list, list) { | ||
175 | list_del(&ent->list); | ||
176 | destroy_tty_buffer(ent); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. | ||
182 | * @priv: Pointer to hvc_iucv_private structure. | ||
183 | * @buf: HVC console buffer for writing received terminal data. | ||
184 | * @count: HVC console buffer size. | ||
185 | * @has_more_data: Pointer to an int variable. | ||
186 | * | ||
187 | * The function picks up pending messages from the input queue and receives | ||
188 | * the message data that is then written to the specified buffer @buf. | ||
189 | * If the buffer size @count is less than the data message size, then the | ||
190 | * message is kept on the input queue and @has_more_data is set to 1. | ||
191 | * If the message data has been entirely written, the message is removed from | ||
192 | * the input queue. | ||
193 | * | ||
194 | * The function returns the number of bytes written to the terminal, zero if | ||
195 | * there are no pending data messages available or if there is no established | ||
196 | * IUCV path. | ||
197 | * If the IUCV path has been severed, then -EPIPE is returned to cause a | ||
198 | * hang up (that is issued by the HVC console layer). | ||
199 | */ | ||
200 | static int hvc_iucv_write(struct hvc_iucv_private *priv, | ||
201 | char *buf, int count, int *has_more_data) | ||
202 | { | ||
203 | struct iucv_tty_buffer *rb; | ||
204 | int written; | ||
205 | int rc; | ||
206 | |||
207 | /* Immediately return if there is no IUCV connection */ | ||
208 | if (priv->iucv_state == IUCV_DISCONN) | ||
209 | return 0; | ||
210 | |||
211 | /* If the IUCV path has been severed, return -EPIPE to inform the | ||
212 | * hvc console layer to hang up the tty device. */ | ||
213 | if (priv->iucv_state == IUCV_SEVERED) | ||
214 | return -EPIPE; | ||
215 | |||
216 | /* check if there are pending messages */ | ||
217 | if (list_empty(&priv->tty_inqueue)) | ||
218 | return 0; | ||
219 | |||
220 | /* receive a iucv message and flip data to the tty (ldisc) */ | ||
221 | rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); | ||
222 | |||
223 | written = 0; | ||
224 | if (!rb->mbuf) { /* message not yet received ... */ | ||
225 | /* allocate mem to store msg data; if no memory is available | ||
226 | * then leave the buffer on the list and re-try later */ | ||
227 | rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC); | ||
228 | if (!rb->mbuf) | ||
229 | return -ENOMEM; | ||
230 | |||
231 | rc = __iucv_message_receive(priv->path, &rb->msg, 0, | ||
232 | rb->mbuf, rb->msg.length, NULL); | ||
233 | switch (rc) { | ||
234 | case 0: /* Successful */ | ||
235 | break; | ||
236 | case 2: /* No message found */ | ||
237 | case 9: /* Message purged */ | ||
238 | break; | ||
239 | default: | ||
240 | written = -EIO; | ||
241 | } | ||
242 | /* remove buffer if an error has occured or received data | ||
243 | * is not correct */ | ||
244 | if (rc || (rb->mbuf->version != MSG_VERSION) || | ||
245 | (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) | ||
246 | goto out_remove_buffer; | ||
247 | } | ||
248 | |||
249 | switch (rb->mbuf->type) { | ||
250 | case MSG_TYPE_DATA: | ||
251 | written = min_t(int, rb->mbuf->datalen - rb->offset, count); | ||
252 | memcpy(buf, rb->mbuf->data + rb->offset, written); | ||
253 | if (written < (rb->mbuf->datalen - rb->offset)) { | ||
254 | rb->offset += written; | ||
255 | *has_more_data = 1; | ||
256 | goto out_written; | ||
257 | } | ||
258 | break; | ||
259 | |||
260 | case MSG_TYPE_WINSIZE: | ||
261 | if (rb->mbuf->datalen != sizeof(struct winsize)) | ||
262 | break; | ||
263 | hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); | ||
264 | break; | ||
265 | |||
266 | case MSG_TYPE_ERROR: /* ignored ... */ | ||
267 | case MSG_TYPE_TERMENV: /* ignored ... */ | ||
268 | case MSG_TYPE_TERMIOS: /* ignored ... */ | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out_remove_buffer: | ||
273 | list_del(&rb->list); | ||
274 | destroy_tty_buffer(rb); | ||
275 | *has_more_data = !list_empty(&priv->tty_inqueue); | ||
276 | |||
277 | out_written: | ||
278 | return written; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * hvc_iucv_get_chars() - HVC get_chars operation. | ||
283 | * @vtermno: HVC virtual terminal number. | ||
284 | * @buf: Pointer to a buffer to store data | ||
285 | * @count: Size of buffer available for writing | ||
286 | * | ||
287 | * The hvc_console thread calls this method to read characters from | ||
288 | * the terminal backend. If an IUCV communication path has been established, | ||
289 | * pending IUCV messages are received and data is copied into buffer @buf | ||
290 | * up to @count bytes. | ||
291 | * | ||
292 | * Locking: The routine gets called under an irqsave() spinlock; and | ||
293 | * the routine locks the struct hvc_iucv_private->lock to call | ||
294 | * helper functions. | ||
295 | */ | ||
296 | static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) | ||
297 | { | ||
298 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
299 | int written; | ||
300 | int has_more_data; | ||
301 | |||
302 | if (count <= 0) | ||
303 | return 0; | ||
304 | |||
305 | if (!priv) | ||
306 | return -ENODEV; | ||
307 | |||
308 | spin_lock(&priv->lock); | ||
309 | has_more_data = 0; | ||
310 | written = hvc_iucv_write(priv, buf, count, &has_more_data); | ||
311 | spin_unlock(&priv->lock); | ||
312 | |||
313 | /* if there are still messages on the queue... schedule another run */ | ||
314 | if (has_more_data) | ||
315 | hvc_kick(); | ||
316 | |||
317 | return written; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * hvc_iucv_send() - Send an IUCV message containing terminal data. | ||
322 | * @priv: Pointer to struct hvc_iucv_private instance. | ||
323 | * @buf: Buffer containing data to send. | ||
324 | * @size: Size of buffer and amount of data to send. | ||
325 | * | ||
326 | * If an IUCV communication path is established, the function copies the buffer | ||
327 | * data to a newly allocated struct iucv_tty_buffer element, sends the data and | ||
328 | * puts the element to the outqueue. | ||
329 | * | ||
330 | * If there is no IUCV communication path established, the function returns 0. | ||
331 | * If an existing IUCV communicaton path has been severed, the function returns | ||
332 | * -EPIPE (can be passed to HVC layer to cause a tty hangup). | ||
333 | */ | ||
334 | static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, | ||
335 | int count) | ||
336 | { | ||
337 | struct iucv_tty_buffer *sb; | ||
338 | int rc; | ||
339 | u16 len; | ||
340 | |||
341 | if (priv->iucv_state == IUCV_SEVERED) | ||
342 | return -EPIPE; | ||
343 | |||
344 | if (priv->iucv_state == IUCV_DISCONN) | ||
345 | return 0; | ||
346 | |||
347 | len = min_t(u16, MSG_MAX_DATALEN, count); | ||
348 | |||
349 | /* allocate internal buffer to store msg data and also compute total | ||
350 | * message length */ | ||
351 | sb = alloc_tty_buffer(len, GFP_ATOMIC); | ||
352 | if (!sb) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | sb->mbuf->datalen = len; | ||
356 | memcpy(sb->mbuf->data, buf, len); | ||
357 | |||
358 | list_add_tail(&sb->list, &priv->tty_outqueue); | ||
359 | |||
360 | rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, | ||
361 | (void *) sb->mbuf, sb->msg.length); | ||
362 | if (rc) { | ||
363 | list_del(&sb->list); | ||
364 | destroy_tty_buffer(sb); | ||
365 | len = 0; | ||
366 | } | ||
367 | |||
368 | return len; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * hvc_iucv_put_chars() - HVC put_chars operation. | ||
373 | * @vtermno: HVC virtual terminal number. | ||
374 | * @buf: Pointer to an buffer to read data from | ||
375 | * @count: Size of buffer available for reading | ||
376 | * | ||
377 | * The hvc_console thread calls this method to write characters from | ||
378 | * to the terminal backend. | ||
379 | * The function calls hvc_iucv_send() under the lock of the | ||
380 | * struct hvc_iucv_private instance that corresponds to the tty @vtermno. | ||
381 | * | ||
382 | * Locking: The method gets called under an irqsave() spinlock; and | ||
383 | * locks struct hvc_iucv_private->lock. | ||
384 | */ | ||
385 | static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | ||
386 | { | ||
387 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
388 | int sent; | ||
389 | |||
390 | if (count <= 0) | ||
391 | return 0; | ||
392 | |||
393 | if (!priv) | ||
394 | return -ENODEV; | ||
395 | |||
396 | spin_lock(&priv->lock); | ||
397 | sent = hvc_iucv_send(priv, buf, count); | ||
398 | spin_unlock(&priv->lock); | ||
399 | |||
400 | return sent; | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. | ||
405 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
406 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
407 | * hvc_iucv_private instance. | ||
408 | * | ||
409 | * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private | ||
410 | * instance that is derived from @id. Always returns 0. | ||
411 | * | ||
412 | * Locking: struct hvc_iucv_private->lock, spin_lock_bh | ||
413 | */ | ||
414 | static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) | ||
415 | { | ||
416 | struct hvc_iucv_private *priv; | ||
417 | |||
418 | priv = hvc_iucv_get_private(id); | ||
419 | if (!priv) | ||
420 | return 0; | ||
421 | |||
422 | spin_lock_bh(&priv->lock); | ||
423 | priv->tty_state = TTY_OPENED; | ||
424 | spin_unlock_bh(&priv->lock); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. | ||
431 | * @priv: Pointer to the struct hvc_iucv_private instance. | ||
432 | * | ||
433 | * The functions severs the established IUCV communication path (if any), and | ||
434 | * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally, | ||
435 | * the functions resets the states to TTY_CLOSED and IUCV_DISCONN. | ||
436 | */ | ||
437 | static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) | ||
438 | { | ||
439 | destroy_tty_buffer_list(&priv->tty_outqueue); | ||
440 | destroy_tty_buffer_list(&priv->tty_inqueue); | ||
441 | |||
442 | priv->tty_state = TTY_CLOSED; | ||
443 | priv->iucv_state = IUCV_DISCONN; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. | ||
448 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
449 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
450 | * hvc_iucv_private instance. | ||
451 | * | ||
452 | * This routine notifies the HVC backend that a tty hangup (carrier loss, | ||
453 | * virtual or otherwise) has occured. | ||
454 | * | ||
455 | * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep | ||
456 | * an existing IUCV communication path established. | ||
457 | * (Background: vhangup() is called from user space (by getty or login) to | ||
458 | * disable writing to the tty by other applications). | ||
459 | * | ||
460 | * If the tty has been opened (e.g. getty) and an established IUCV path has been | ||
461 | * severed (we caused the tty hangup in that case), then the functions invokes | ||
462 | * hvc_iucv_cleanup() to clean up. | ||
463 | * | ||
464 | * Locking: struct hvc_iucv_private->lock | ||
465 | */ | ||
466 | static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) | ||
467 | { | ||
468 | struct hvc_iucv_private *priv; | ||
469 | |||
470 | priv = hvc_iucv_get_private(id); | ||
471 | if (!priv) | ||
472 | return; | ||
473 | |||
474 | spin_lock_bh(&priv->lock); | ||
475 | /* NOTE: If the hangup was scheduled by ourself (from the iucv | ||
476 | * path_servered callback [IUCV_SEVERED]), then we have to | ||
477 | * finally clean up the tty backend structure and set state to | ||
478 | * TTY_CLOSED. | ||
479 | * | ||
480 | * If the tty was hung up otherwise (e.g. vhangup()), then we | ||
481 | * ignore this hangup and keep an established IUCV path open... | ||
482 | * (...the reason is that we are not able to connect back to the | ||
483 | * client if we disconnect on hang up) */ | ||
484 | priv->tty_state = TTY_CLOSED; | ||
485 | |||
486 | if (priv->iucv_state == IUCV_SEVERED) | ||
487 | hvc_iucv_cleanup(priv); | ||
488 | spin_unlock_bh(&priv->lock); | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. | ||
493 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
494 | * @id: Additional data (originally passed to hvc_alloc): | ||
495 | * the index of an struct hvc_iucv_private instance. | ||
496 | * | ||
497 | * This routine notifies the HVC backend that the last tty device file | ||
498 | * descriptor has been closed. | ||
499 | * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private | ||
500 | * instance. | ||
501 | * | ||
502 | * Locking: struct hvc_iucv_private->lock | ||
503 | */ | ||
504 | static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) | ||
505 | { | ||
506 | struct hvc_iucv_private *priv; | ||
507 | struct iucv_path *path; | ||
508 | |||
509 | priv = hvc_iucv_get_private(id); | ||
510 | if (!priv) | ||
511 | return; | ||
512 | |||
513 | spin_lock_bh(&priv->lock); | ||
514 | path = priv->path; /* save reference to IUCV path */ | ||
515 | priv->path = NULL; | ||
516 | hvc_iucv_cleanup(priv); | ||
517 | spin_unlock_bh(&priv->lock); | ||
518 | |||
519 | /* sever IUCV path outside of priv->lock due to lock ordering of: | ||
520 | * priv->lock <--> iucv_table_lock */ | ||
521 | if (path) { | ||
522 | iucv_path_sever(path, NULL); | ||
523 | iucv_path_free(path); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * hvc_iucv_path_pending() - IUCV handler to process a connection request. | ||
529 | * @path: Pending path (struct iucv_path) | ||
530 | * @ipvmid: Originator z/VM system identifier | ||
531 | * @ipuser: User specified data for this path | ||
532 | * (AF_IUCV: port/service name and originator port) | ||
533 | * | ||
534 | * The function uses the @ipuser data to check to determine if the pending | ||
535 | * path belongs to a terminal managed by this HVC backend. | ||
536 | * If the check is successful, then an additional check is done to ensure | ||
537 | * that a terminal cannot be accessed multiple times (only one connection | ||
538 | * to a terminal is allowed). In that particular case, the pending path is | ||
539 | * severed. If it is the first connection, the pending path is accepted and | ||
540 | * associated to the struct hvc_iucv_private. The iucv state is updated to | ||
541 | * reflect that a communication path has been established. | ||
542 | * | ||
543 | * Returns 0 if the path belongs to a terminal managed by the this HVC backend; | ||
544 | * otherwise returns -ENODEV in order to dispatch this path to other handlers. | ||
545 | * | ||
546 | * Locking: struct hvc_iucv_private->lock | ||
547 | */ | ||
548 | static int hvc_iucv_path_pending(struct iucv_path *path, | ||
549 | u8 ipvmid[8], u8 ipuser[16]) | ||
550 | { | ||
551 | struct hvc_iucv_private *priv; | ||
552 | u8 nuser_data[16]; | ||
553 | int i, rc; | ||
554 | |||
555 | priv = NULL; | ||
556 | for (i = 0; i < hvc_iucv_devices; i++) | ||
557 | if (hvc_iucv_table[i] && | ||
558 | (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) { | ||
559 | priv = hvc_iucv_table[i]; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | if (!priv) | ||
564 | return -ENODEV; | ||
565 | |||
566 | spin_lock(&priv->lock); | ||
567 | |||
568 | /* If the terminal is already connected or being severed, then sever | ||
569 | * this path to enforce that there is only ONE established communication | ||
570 | * path per terminal. */ | ||
571 | if (priv->iucv_state != IUCV_DISCONN) { | ||
572 | iucv_path_sever(path, ipuser); | ||
573 | iucv_path_free(path); | ||
574 | goto out_path_handled; | ||
575 | } | ||
576 | |||
577 | /* accept path */ | ||
578 | memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */ | ||
579 | memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */ | ||
580 | path->msglim = 0xffff; /* IUCV MSGLIMIT */ | ||
581 | path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ | ||
582 | rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); | ||
583 | if (rc) { | ||
584 | iucv_path_sever(path, ipuser); | ||
585 | iucv_path_free(path); | ||
586 | goto out_path_handled; | ||
587 | } | ||
588 | priv->path = path; | ||
589 | priv->iucv_state = IUCV_CONNECTED; | ||
590 | |||
591 | out_path_handled: | ||
592 | spin_unlock(&priv->lock); | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * hvc_iucv_path_severed() - IUCV handler to process a path sever. | ||
598 | * @path: Pending path (struct iucv_path) | ||
599 | * @ipuser: User specified data for this path | ||
600 | * (AF_IUCV: port/service name and originator port) | ||
601 | * | ||
602 | * The function also severs the path (as required by the IUCV protocol) and | ||
603 | * sets the iucv state to IUCV_SEVERED for the associated struct | ||
604 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty | ||
605 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). | ||
606 | * | ||
607 | * If tty portion of the HVC is closed then clean up the outqueue in addition. | ||
608 | * | ||
609 | * Locking: struct hvc_iucv_private->lock | ||
610 | */ | ||
611 | static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | ||
612 | { | ||
613 | struct hvc_iucv_private *priv = path->private; | ||
614 | |||
615 | spin_lock(&priv->lock); | ||
616 | priv->iucv_state = IUCV_SEVERED; | ||
617 | |||
618 | /* NOTE: If the tty has not yet been opened by a getty program | ||
619 | * (e.g. to see console messages), then cleanup the | ||
620 | * hvc_iucv_private structure to allow re-connects. | ||
621 | * | ||
622 | * If the tty has been opened, the get_chars() callback returns | ||
623 | * -EPIPE to signal the hvc console layer to hang up the tty. */ | ||
624 | priv->path = NULL; | ||
625 | if (priv->tty_state == TTY_CLOSED) | ||
626 | hvc_iucv_cleanup(priv); | ||
627 | spin_unlock(&priv->lock); | ||
628 | |||
629 | /* finally sever path (outside of priv->lock due to lock ordering) */ | ||
630 | iucv_path_sever(path, ipuser); | ||
631 | iucv_path_free(path); | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. | ||
636 | * @path: Pending path (struct iucv_path) | ||
637 | * @msg: Pointer to the IUCV message | ||
638 | * | ||
639 | * The function stores an incoming message on the input queue for later | ||
640 | * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). | ||
641 | * However, if the tty has not yet been opened, the message is rejected. | ||
642 | * | ||
643 | * Locking: struct hvc_iucv_private->lock | ||
644 | */ | ||
645 | static void hvc_iucv_msg_pending(struct iucv_path *path, | ||
646 | struct iucv_message *msg) | ||
647 | { | ||
648 | struct hvc_iucv_private *priv = path->private; | ||
649 | struct iucv_tty_buffer *rb; | ||
650 | |||
651 | spin_lock(&priv->lock); | ||
652 | |||
653 | /* reject messages if tty has not yet been opened */ | ||
654 | if (priv->tty_state == TTY_CLOSED) { | ||
655 | iucv_message_reject(path, msg); | ||
656 | goto unlock_return; | ||
657 | } | ||
658 | |||
659 | /* allocate buffer an empty buffer element */ | ||
660 | rb = alloc_tty_buffer(0, GFP_ATOMIC); | ||
661 | if (!rb) { | ||
662 | iucv_message_reject(path, msg); | ||
663 | goto unlock_return; /* -ENOMEM */ | ||
664 | } | ||
665 | rb->msg = *msg; | ||
666 | |||
667 | list_add_tail(&rb->list, &priv->tty_inqueue); | ||
668 | |||
669 | hvc_kick(); /* wakup hvc console thread */ | ||
670 | |||
671 | unlock_return: | ||
672 | spin_unlock(&priv->lock); | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * hvc_iucv_msg_complete() - IUCV handler to process message completion | ||
677 | * @path: Pending path (struct iucv_path) | ||
678 | * @msg: Pointer to the IUCV message | ||
679 | * | ||
680 | * The function is called upon completion of message delivery and the | ||
681 | * message is removed from the outqueue. Additional delivery information | ||
682 | * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and | ||
683 | * purged messages (0x010000 (IPADPGNR)). | ||
684 | * | ||
685 | * Locking: struct hvc_iucv_private->lock | ||
686 | */ | ||
687 | static void hvc_iucv_msg_complete(struct iucv_path *path, | ||
688 | struct iucv_message *msg) | ||
689 | { | ||
690 | struct hvc_iucv_private *priv = path->private; | ||
691 | struct iucv_tty_buffer *ent, *next; | ||
692 | LIST_HEAD(list_remove); | ||
693 | |||
694 | spin_lock(&priv->lock); | ||
695 | list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) | ||
696 | if (ent->msg.id == msg->id) { | ||
697 | list_move(&ent->list, &list_remove); | ||
698 | break; | ||
699 | } | ||
700 | spin_unlock(&priv->lock); | ||
701 | destroy_tty_buffer_list(&list_remove); | ||
702 | } | ||
703 | |||
704 | |||
705 | /* HVC operations */ | ||
706 | static struct hv_ops hvc_iucv_ops = { | ||
707 | .get_chars = hvc_iucv_get_chars, | ||
708 | .put_chars = hvc_iucv_put_chars, | ||
709 | .notifier_add = hvc_iucv_notifier_add, | ||
710 | .notifier_del = hvc_iucv_notifier_del, | ||
711 | .notifier_hangup = hvc_iucv_notifier_hangup, | ||
712 | }; | ||
713 | |||
714 | /** | ||
715 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance | ||
716 | * @id: hvc_iucv_table index | ||
717 | * | ||
718 | * This function allocates a new hvc_iucv_private struct and put the | ||
719 | * instance into hvc_iucv_table at index @id. | ||
720 | * Returns 0 on success; otherwise non-zero. | ||
721 | */ | ||
722 | static int __init hvc_iucv_alloc(int id) | ||
723 | { | ||
724 | struct hvc_iucv_private *priv; | ||
725 | char name[9]; | ||
726 | int rc; | ||
727 | |||
728 | priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); | ||
729 | if (!priv) | ||
730 | return -ENOMEM; | ||
731 | |||
732 | spin_lock_init(&priv->lock); | ||
733 | INIT_LIST_HEAD(&priv->tty_outqueue); | ||
734 | INIT_LIST_HEAD(&priv->tty_inqueue); | ||
735 | |||
736 | /* Finally allocate hvc */ | ||
737 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, | ||
738 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); | ||
739 | if (IS_ERR(priv->hvc)) { | ||
740 | rc = PTR_ERR(priv->hvc); | ||
741 | kfree(priv); | ||
742 | return rc; | ||
743 | } | ||
744 | |||
745 | /* setup iucv related information */ | ||
746 | snprintf(name, 9, "ihvc%-4d", id); | ||
747 | memcpy(priv->srv_name, name, 8); | ||
748 | ASCEBC(priv->srv_name, 8); | ||
749 | |||
750 | hvc_iucv_table[id] = priv; | ||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV | ||
756 | */ | ||
757 | static int __init hvc_iucv_init(void) | ||
758 | { | ||
759 | int rc, i; | ||
760 | |||
761 | if (!MACHINE_IS_VM) { | ||
762 | pr_warning("The z/VM IUCV Hypervisor console cannot be " | ||
763 | "used without z/VM.\n"); | ||
764 | return -ENODEV; | ||
765 | } | ||
766 | |||
767 | if (!hvc_iucv_devices) | ||
768 | return -ENODEV; | ||
769 | |||
770 | if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) | ||
771 | return -EINVAL; | ||
772 | |||
773 | hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, | ||
774 | sizeof(struct iucv_tty_buffer), | ||
775 | 0, 0, NULL); | ||
776 | if (!hvc_iucv_buffer_cache) { | ||
777 | pr_err("Not enough memory for driver initialization " | ||
778 | "(rs=%d).\n", 1); | ||
779 | return -ENOMEM; | ||
780 | } | ||
781 | |||
782 | hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, | ||
783 | hvc_iucv_buffer_cache); | ||
784 | if (!hvc_iucv_mempool) { | ||
785 | pr_err("Not enough memory for driver initialization " | ||
786 | "(rs=%d).\n", 2); | ||
787 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
788 | return -ENOMEM; | ||
789 | } | ||
790 | |||
791 | /* allocate hvc_iucv_private structs */ | ||
792 | for (i = 0; i < hvc_iucv_devices; i++) { | ||
793 | rc = hvc_iucv_alloc(i); | ||
794 | if (rc) { | ||
795 | pr_err("Could not create new z/VM IUCV HVC backend " | ||
796 | "rc=%d.\n", rc); | ||
797 | goto out_error_hvc; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /* register IUCV callback handler */ | ||
802 | rc = iucv_register(&hvc_iucv_handler, 0); | ||
803 | if (rc) { | ||
804 | pr_err("Could not register iucv handler (rc=%d).\n", rc); | ||
805 | goto out_error_iucv; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | |||
810 | out_error_iucv: | ||
811 | iucv_unregister(&hvc_iucv_handler, 0); | ||
812 | out_error_hvc: | ||
813 | for (i = 0; i < hvc_iucv_devices; i++) | ||
814 | if (hvc_iucv_table[i]) { | ||
815 | if (hvc_iucv_table[i]->hvc) | ||
816 | hvc_remove(hvc_iucv_table[i]->hvc); | ||
817 | kfree(hvc_iucv_table[i]); | ||
818 | } | ||
819 | mempool_destroy(hvc_iucv_mempool); | ||
820 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
821 | return rc; | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * hvc_iucv_console_init() - Early console initialization | ||
826 | */ | ||
827 | static int __init hvc_iucv_console_init(void) | ||
828 | { | ||
829 | if (!MACHINE_IS_VM || !hvc_iucv_devices) | ||
830 | return -ENODEV; | ||
831 | return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops); | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter | ||
836 | * @val: Parameter value (numeric) | ||
837 | */ | ||
838 | static int __init hvc_iucv_config(char *val) | ||
839 | { | ||
840 | return strict_strtoul(val, 10, &hvc_iucv_devices); | ||
841 | } | ||
842 | |||
843 | |||
844 | module_init(hvc_iucv_init); | ||
845 | console_initcall(hvc_iucv_console_init); | ||
846 | __setup("hvc_iucv=", hvc_iucv_config); | ||
847 | |||
848 | MODULE_LICENSE("GPL"); | ||
849 | MODULE_DESCRIPTION("HVC back-end for z/VM IUCV."); | ||
850 | MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); | ||
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index ce0d9da52a8a..94966edfb44d 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -274,6 +274,22 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = { | |||
274 | .enable_mask = SYSRQ_ENABLE_DUMP, | 274 | .enable_mask = SYSRQ_ENABLE_DUMP, |
275 | }; | 275 | }; |
276 | 276 | ||
277 | #ifdef CONFIG_TRACING | ||
278 | #include <linux/ftrace.h> | ||
279 | |||
280 | static void sysrq_ftrace_dump(int key, struct tty_struct *tty) | ||
281 | { | ||
282 | ftrace_dump(); | ||
283 | } | ||
284 | static struct sysrq_key_op sysrq_ftrace_dump_op = { | ||
285 | .handler = sysrq_ftrace_dump, | ||
286 | .help_msg = "dumpZ-ftrace-buffer", | ||
287 | .action_msg = "Dump ftrace buffer", | ||
288 | .enable_mask = SYSRQ_ENABLE_DUMP, | ||
289 | }; | ||
290 | #else | ||
291 | #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0) | ||
292 | #endif | ||
277 | 293 | ||
278 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) | 294 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) |
279 | { | 295 | { |
@@ -406,7 +422,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
406 | NULL, /* x */ | 422 | NULL, /* x */ |
407 | /* y: May be registered on sparc64 for global register dump */ | 423 | /* y: May be registered on sparc64 for global register dump */ |
408 | NULL, /* y */ | 424 | NULL, /* y */ |
409 | NULL /* z */ | 425 | &sysrq_ftrace_dump_op, /* z */ |
410 | }; | 426 | }; |
411 | 427 | ||
412 | /* key2index calculation, -1 on invalid index */ | 428 | /* key2index calculation, -1 on invalid index */ |
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c index 5787249934c8..34ab6d798f81 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/char/tty_audit.c | |||
@@ -67,6 +67,29 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) | |||
67 | tty_audit_buf_free(buf); | 67 | tty_audit_buf_free(buf); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void tty_audit_log(const char *description, struct task_struct *tsk, | ||
71 | uid_t loginuid, unsigned sessionid, int major, | ||
72 | int minor, unsigned char *data, size_t size) | ||
73 | { | ||
74 | struct audit_buffer *ab; | ||
75 | |||
76 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); | ||
77 | if (ab) { | ||
78 | char name[sizeof(tsk->comm)]; | ||
79 | uid_t uid = task_uid(tsk); | ||
80 | |||
81 | audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " | ||
82 | "major=%d minor=%d comm=", description, | ||
83 | tsk->pid, uid, loginuid, sessionid, | ||
84 | major, minor); | ||
85 | get_task_comm(name, tsk); | ||
86 | audit_log_untrustedstring(ab, name); | ||
87 | audit_log_format(ab, " data="); | ||
88 | audit_log_n_hex(ab, data, size); | ||
89 | audit_log_end(ab); | ||
90 | } | ||
91 | } | ||
92 | |||
70 | /** | 93 | /** |
71 | * tty_audit_buf_push - Push buffered data out | 94 | * tty_audit_buf_push - Push buffered data out |
72 | * | 95 | * |
@@ -77,25 +100,12 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid, | |||
77 | unsigned int sessionid, | 100 | unsigned int sessionid, |
78 | struct tty_audit_buf *buf) | 101 | struct tty_audit_buf *buf) |
79 | { | 102 | { |
80 | struct audit_buffer *ab; | ||
81 | |||
82 | if (buf->valid == 0) | 103 | if (buf->valid == 0) |
83 | return; | 104 | return; |
84 | if (audit_enabled == 0) | 105 | if (audit_enabled == 0) |
85 | return; | 106 | return; |
86 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); | 107 | tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, |
87 | if (ab) { | 108 | buf->data, buf->valid); |
88 | char name[sizeof(tsk->comm)]; | ||
89 | |||
90 | audit_log_format(ab, "tty pid=%u uid=%u auid=%u ses=%u " | ||
91 | "major=%d minor=%d comm=", tsk->pid, tsk->uid, | ||
92 | loginuid, sessionid, buf->major, buf->minor); | ||
93 | get_task_comm(name, tsk); | ||
94 | audit_log_untrustedstring(ab, name); | ||
95 | audit_log_format(ab, " data="); | ||
96 | audit_log_n_hex(ab, buf->data, buf->valid); | ||
97 | audit_log_end(ab); | ||
98 | } | ||
99 | buf->valid = 0; | 109 | buf->valid = 0; |
100 | } | 110 | } |
101 | 111 | ||
@@ -150,6 +160,42 @@ void tty_audit_fork(struct signal_struct *sig) | |||
150 | } | 160 | } |
151 | 161 | ||
152 | /** | 162 | /** |
163 | * tty_audit_tiocsti - Log TIOCSTI | ||
164 | */ | ||
165 | void tty_audit_tiocsti(struct tty_struct *tty, char ch) | ||
166 | { | ||
167 | struct tty_audit_buf *buf; | ||
168 | int major, minor, should_audit; | ||
169 | |||
170 | spin_lock_irq(¤t->sighand->siglock); | ||
171 | should_audit = current->signal->audit_tty; | ||
172 | buf = current->signal->tty_audit_buf; | ||
173 | if (buf) | ||
174 | atomic_inc(&buf->count); | ||
175 | spin_unlock_irq(¤t->sighand->siglock); | ||
176 | |||
177 | major = tty->driver->major; | ||
178 | minor = tty->driver->minor_start + tty->index; | ||
179 | if (buf) { | ||
180 | mutex_lock(&buf->mutex); | ||
181 | if (buf->major == major && buf->minor == minor) | ||
182 | tty_audit_buf_push_current(buf); | ||
183 | mutex_unlock(&buf->mutex); | ||
184 | tty_audit_buf_put(buf); | ||
185 | } | ||
186 | |||
187 | if (should_audit && audit_enabled) { | ||
188 | uid_t auid; | ||
189 | unsigned int sessionid; | ||
190 | |||
191 | auid = audit_get_loginuid(current); | ||
192 | sessionid = audit_get_sessionid(current); | ||
193 | tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, | ||
194 | minor, &ch, 1); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /** | ||
153 | * tty_audit_push_task - Flush task's pending audit data | 199 | * tty_audit_push_task - Flush task's pending audit data |
154 | */ | 200 | */ |
155 | void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) | 201 | void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 1412a8d1e58d..db15f9ba7c0b 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -2018,6 +2018,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p) | |||
2018 | return -EPERM; | 2018 | return -EPERM; |
2019 | if (get_user(ch, p)) | 2019 | if (get_user(ch, p)) |
2020 | return -EFAULT; | 2020 | return -EFAULT; |
2021 | tty_audit_tiocsti(tty, ch); | ||
2021 | ld = tty_ldisc_ref_wait(tty); | 2022 | ld = tty_ldisc_ref_wait(tty); |
2022 | ld->ops->receive_buf(tty, &ch, &mbz, 1); | 2023 | ld->ops->receive_buf(tty, &ch, &mbz, 1); |
2023 | tty_ldisc_deref(ld); | 2024 | tty_ldisc_deref(ld); |
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index aa7f7962a9a0..05d897764f02 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2003-2008 Xilinx Inc. | 24 | * (c) Copyright 2003-2008 Xilinx Inc. |
28 | * All rights reserved. | 25 | * All rights reserved. |
29 | * | 26 | * |
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h index 8b0252bf06e2..d4f419ee87ab 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.h +++ b/drivers/char/xilinx_hwicap/buffer_icap.h | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2003-2008 Xilinx Inc. | 24 | * (c) Copyright 2003-2008 Xilinx Inc. |
28 | * All rights reserved. | 25 | * All rights reserved. |
29 | * | 26 | * |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 776b50528478..02225eb19cf6 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2007-2008 Xilinx Inc. | 24 | * (c) Copyright 2007-2008 Xilinx Inc. |
28 | * All rights reserved. | 25 | * All rights reserved. |
29 | * | 26 | * |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h index 62bda453c90b..4c9dd9a3b62a 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.h +++ b/drivers/char/xilinx_hwicap/fifo_icap.h | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2007-2008 Xilinx Inc. | 24 | * (c) Copyright 2007-2008 Xilinx Inc. |
28 | * All rights reserved. | 25 | * All rights reserved. |
29 | * | 26 | * |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d16131949097..f40ab699860f 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group | 24 | * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group |
28 | * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group | 25 | * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group |
29 | * (c) Copyright 2007-2008 Xilinx Inc. | 26 | * (c) Copyright 2007-2008 Xilinx Inc. |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index 24d0d9b938fb..8cca11981c5f 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -21,9 +21,6 @@ | |||
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * Xilinx products are not intended for use in life support appliances, | ||
25 | * devices, or systems. Use in such applications is expressly prohibited. | ||
26 | * | ||
27 | * (c) Copyright 2003-2007 Xilinx Inc. | 24 | * (c) Copyright 2003-2007 Xilinx Inc. |
28 | * All rights reserved. | 25 | * All rights reserved. |
29 | * | 26 | * |
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 5c9f67f98d10..c5afc98e2675 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
@@ -106,6 +106,7 @@ void proc_id_connector(struct task_struct *task, int which_id) | |||
106 | struct proc_event *ev; | 106 | struct proc_event *ev; |
107 | __u8 buffer[CN_PROC_MSG_SIZE]; | 107 | __u8 buffer[CN_PROC_MSG_SIZE]; |
108 | struct timespec ts; | 108 | struct timespec ts; |
109 | const struct cred *cred; | ||
109 | 110 | ||
110 | if (atomic_read(&proc_event_num_listeners) < 1) | 111 | if (atomic_read(&proc_event_num_listeners) < 1) |
111 | return; | 112 | return; |
@@ -115,14 +116,19 @@ void proc_id_connector(struct task_struct *task, int which_id) | |||
115 | ev->what = which_id; | 116 | ev->what = which_id; |
116 | ev->event_data.id.process_pid = task->pid; | 117 | ev->event_data.id.process_pid = task->pid; |
117 | ev->event_data.id.process_tgid = task->tgid; | 118 | ev->event_data.id.process_tgid = task->tgid; |
119 | rcu_read_lock(); | ||
120 | cred = __task_cred(task); | ||
118 | if (which_id == PROC_EVENT_UID) { | 121 | if (which_id == PROC_EVENT_UID) { |
119 | ev->event_data.id.r.ruid = task->uid; | 122 | ev->event_data.id.r.ruid = cred->uid; |
120 | ev->event_data.id.e.euid = task->euid; | 123 | ev->event_data.id.e.euid = cred->euid; |
121 | } else if (which_id == PROC_EVENT_GID) { | 124 | } else if (which_id == PROC_EVENT_GID) { |
122 | ev->event_data.id.r.rgid = task->gid; | 125 | ev->event_data.id.r.rgid = cred->gid; |
123 | ev->event_data.id.e.egid = task->egid; | 126 | ev->event_data.id.e.egid = cred->egid; |
124 | } else | 127 | } else { |
128 | rcu_read_unlock(); | ||
125 | return; | 129 | return; |
130 | } | ||
131 | rcu_read_unlock(); | ||
126 | get_seq(&msg->seq, &ev->cpu); | 132 | get_seq(&msg->seq, &ev->cpu); |
127 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 133 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
128 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 134 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 4d22b21bd3e3..0c79fe7f1567 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -38,9 +38,6 @@ | |||
38 | 38 | ||
39 | #include <asm/kmap_types.h> | 39 | #include <asm/kmap_types.h> |
40 | 40 | ||
41 | #undef dprintk | ||
42 | |||
43 | #define HIFN_TEST | ||
44 | //#define HIFN_DEBUG | 41 | //#define HIFN_DEBUG |
45 | 42 | ||
46 | #ifdef HIFN_DEBUG | 43 | #ifdef HIFN_DEBUG |
@@ -363,14 +360,14 @@ static atomic_t hifn_dev_number; | |||
363 | #define HIFN_NAMESIZE 32 | 360 | #define HIFN_NAMESIZE 32 |
364 | #define HIFN_MAX_RESULT_ORDER 5 | 361 | #define HIFN_MAX_RESULT_ORDER 5 |
365 | 362 | ||
366 | #define HIFN_D_CMD_RSIZE 24*4 | 363 | #define HIFN_D_CMD_RSIZE 24*1 |
367 | #define HIFN_D_SRC_RSIZE 80*4 | 364 | #define HIFN_D_SRC_RSIZE 80*1 |
368 | #define HIFN_D_DST_RSIZE 80*4 | 365 | #define HIFN_D_DST_RSIZE 80*1 |
369 | #define HIFN_D_RES_RSIZE 24*4 | 366 | #define HIFN_D_RES_RSIZE 24*1 |
370 | 367 | ||
371 | #define HIFN_D_DST_DALIGN 4 | 368 | #define HIFN_D_DST_DALIGN 4 |
372 | 369 | ||
373 | #define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1 | 370 | #define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1) |
374 | 371 | ||
375 | #define AES_MIN_KEY_SIZE 16 | 372 | #define AES_MIN_KEY_SIZE 16 |
376 | #define AES_MAX_KEY_SIZE 32 | 373 | #define AES_MAX_KEY_SIZE 32 |
@@ -406,8 +403,6 @@ struct hifn_dma { | |||
406 | u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; | 403 | u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; |
407 | u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; | 404 | u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; |
408 | 405 | ||
409 | u64 test_src, test_dst; | ||
410 | |||
411 | /* | 406 | /* |
412 | * Our current positions for insertion and removal from the descriptor | 407 | * Our current positions for insertion and removal from the descriptor |
413 | * rings. | 408 | * rings. |
@@ -434,9 +429,6 @@ struct hifn_device | |||
434 | struct pci_dev *pdev; | 429 | struct pci_dev *pdev; |
435 | void __iomem *bar[3]; | 430 | void __iomem *bar[3]; |
436 | 431 | ||
437 | unsigned long result_mem; | ||
438 | dma_addr_t dst; | ||
439 | |||
440 | void *desc_virt; | 432 | void *desc_virt; |
441 | dma_addr_t desc_dma; | 433 | dma_addr_t desc_dma; |
442 | 434 | ||
@@ -446,8 +438,6 @@ struct hifn_device | |||
446 | 438 | ||
447 | spinlock_t lock; | 439 | spinlock_t lock; |
448 | 440 | ||
449 | void *priv; | ||
450 | |||
451 | u32 flags; | 441 | u32 flags; |
452 | int active, started; | 442 | int active, started; |
453 | struct delayed_work work; | 443 | struct delayed_work work; |
@@ -657,12 +647,17 @@ struct ablkcipher_walk | |||
657 | 647 | ||
658 | struct hifn_context | 648 | struct hifn_context |
659 | { | 649 | { |
660 | u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv; | 650 | u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; |
661 | struct hifn_device *dev; | 651 | struct hifn_device *dev; |
662 | unsigned int keysize, ivsize; | 652 | unsigned int keysize; |
653 | }; | ||
654 | |||
655 | struct hifn_request_context | ||
656 | { | ||
657 | u8 *iv; | ||
658 | unsigned int ivsize; | ||
663 | u8 op, type, mode, unused; | 659 | u8 op, type, mode, unused; |
664 | struct ablkcipher_walk walk; | 660 | struct ablkcipher_walk walk; |
665 | atomic_t sg_num; | ||
666 | }; | 661 | }; |
667 | 662 | ||
668 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) | 663 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) |
@@ -1168,7 +1163,8 @@ static int hifn_setup_crypto_command(struct hifn_device *dev, | |||
1168 | } | 1163 | } |
1169 | 1164 | ||
1170 | static int hifn_setup_cmd_desc(struct hifn_device *dev, | 1165 | static int hifn_setup_cmd_desc(struct hifn_device *dev, |
1171 | struct hifn_context *ctx, void *priv, unsigned int nbytes) | 1166 | struct hifn_context *ctx, struct hifn_request_context *rctx, |
1167 | void *priv, unsigned int nbytes) | ||
1172 | { | 1168 | { |
1173 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1169 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1174 | int cmd_len, sa_idx; | 1170 | int cmd_len, sa_idx; |
@@ -1179,7 +1175,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1179 | buf_pos = buf = dma->command_bufs[dma->cmdi]; | 1175 | buf_pos = buf = dma->command_bufs[dma->cmdi]; |
1180 | 1176 | ||
1181 | mask = 0; | 1177 | mask = 0; |
1182 | switch (ctx->op) { | 1178 | switch (rctx->op) { |
1183 | case ACRYPTO_OP_DECRYPT: | 1179 | case ACRYPTO_OP_DECRYPT: |
1184 | mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; | 1180 | mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; |
1185 | break; | 1181 | break; |
@@ -1196,15 +1192,15 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1196 | buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, | 1192 | buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, |
1197 | nbytes, mask, dev->snum); | 1193 | nbytes, mask, dev->snum); |
1198 | 1194 | ||
1199 | if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) { | 1195 | if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) { |
1200 | u16 md = 0; | 1196 | u16 md = 0; |
1201 | 1197 | ||
1202 | if (ctx->keysize) | 1198 | if (ctx->keysize) |
1203 | md |= HIFN_CRYPT_CMD_NEW_KEY; | 1199 | md |= HIFN_CRYPT_CMD_NEW_KEY; |
1204 | if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB) | 1200 | if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB) |
1205 | md |= HIFN_CRYPT_CMD_NEW_IV; | 1201 | md |= HIFN_CRYPT_CMD_NEW_IV; |
1206 | 1202 | ||
1207 | switch (ctx->mode) { | 1203 | switch (rctx->mode) { |
1208 | case ACRYPTO_MODE_ECB: | 1204 | case ACRYPTO_MODE_ECB: |
1209 | md |= HIFN_CRYPT_CMD_MODE_ECB; | 1205 | md |= HIFN_CRYPT_CMD_MODE_ECB; |
1210 | break; | 1206 | break; |
@@ -1221,7 +1217,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1221 | goto err_out; | 1217 | goto err_out; |
1222 | } | 1218 | } |
1223 | 1219 | ||
1224 | switch (ctx->type) { | 1220 | switch (rctx->type) { |
1225 | case ACRYPTO_TYPE_AES_128: | 1221 | case ACRYPTO_TYPE_AES_128: |
1226 | if (ctx->keysize != 16) | 1222 | if (ctx->keysize != 16) |
1227 | goto err_out; | 1223 | goto err_out; |
@@ -1256,17 +1252,18 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1256 | 1252 | ||
1257 | buf_pos += hifn_setup_crypto_command(dev, buf_pos, | 1253 | buf_pos += hifn_setup_crypto_command(dev, buf_pos, |
1258 | nbytes, nbytes, ctx->key, ctx->keysize, | 1254 | nbytes, nbytes, ctx->key, ctx->keysize, |
1259 | ctx->iv, ctx->ivsize, md); | 1255 | rctx->iv, rctx->ivsize, md); |
1260 | } | 1256 | } |
1261 | 1257 | ||
1262 | dev->sa[sa_idx] = priv; | 1258 | dev->sa[sa_idx] = priv; |
1259 | dev->started++; | ||
1263 | 1260 | ||
1264 | cmd_len = buf_pos - buf; | 1261 | cmd_len = buf_pos - buf; |
1265 | dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | | 1262 | dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | |
1266 | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); | 1263 | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); |
1267 | 1264 | ||
1268 | if (++dma->cmdi == HIFN_D_CMD_RSIZE) { | 1265 | if (++dma->cmdi == HIFN_D_CMD_RSIZE) { |
1269 | dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND | | 1266 | dma->cmdr[dma->cmdi].l = __cpu_to_le32( |
1270 | HIFN_D_VALID | HIFN_D_LAST | | 1267 | HIFN_D_VALID | HIFN_D_LAST | |
1271 | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); | 1268 | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); |
1272 | dma->cmdi = 0; | 1269 | dma->cmdi = 0; |
@@ -1284,7 +1281,7 @@ err_out: | |||
1284 | } | 1281 | } |
1285 | 1282 | ||
1286 | static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, | 1283 | static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, |
1287 | unsigned int offset, unsigned int size) | 1284 | unsigned int offset, unsigned int size, int last) |
1288 | { | 1285 | { |
1289 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1286 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1290 | int idx; | 1287 | int idx; |
@@ -1296,12 +1293,12 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, | |||
1296 | 1293 | ||
1297 | dma->srcr[idx].p = __cpu_to_le32(addr); | 1294 | dma->srcr[idx].p = __cpu_to_le32(addr); |
1298 | dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | | 1295 | dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | |
1299 | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); | 1296 | HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); |
1300 | 1297 | ||
1301 | if (++idx == HIFN_D_SRC_RSIZE) { | 1298 | if (++idx == HIFN_D_SRC_RSIZE) { |
1302 | dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | | 1299 | dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | |
1303 | HIFN_D_JUMP | | 1300 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | |
1304 | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); | 1301 | (last ? HIFN_D_LAST : 0)); |
1305 | idx = 0; | 1302 | idx = 0; |
1306 | } | 1303 | } |
1307 | 1304 | ||
@@ -1342,7 +1339,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev) | |||
1342 | } | 1339 | } |
1343 | 1340 | ||
1344 | static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, | 1341 | static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, |
1345 | unsigned offset, unsigned size) | 1342 | unsigned offset, unsigned size, int last) |
1346 | { | 1343 | { |
1347 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1344 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1348 | int idx; | 1345 | int idx; |
@@ -1353,12 +1350,12 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, | |||
1353 | idx = dma->dsti; | 1350 | idx = dma->dsti; |
1354 | dma->dstr[idx].p = __cpu_to_le32(addr); | 1351 | dma->dstr[idx].p = __cpu_to_le32(addr); |
1355 | dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | | 1352 | dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | |
1356 | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); | 1353 | HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); |
1357 | 1354 | ||
1358 | if (++idx == HIFN_D_DST_RSIZE) { | 1355 | if (++idx == HIFN_D_DST_RSIZE) { |
1359 | dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | | 1356 | dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | |
1360 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | | 1357 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | |
1361 | HIFN_D_LAST); | 1358 | (last ? HIFN_D_LAST : 0)); |
1362 | idx = 0; | 1359 | idx = 0; |
1363 | } | 1360 | } |
1364 | dma->dsti = idx; | 1361 | dma->dsti = idx; |
@@ -1370,16 +1367,52 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, | |||
1370 | } | 1367 | } |
1371 | } | 1368 | } |
1372 | 1369 | ||
1373 | static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff, | 1370 | static int hifn_setup_dma(struct hifn_device *dev, |
1374 | struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv, | 1371 | struct hifn_context *ctx, struct hifn_request_context *rctx, |
1375 | struct hifn_context *ctx) | 1372 | struct scatterlist *src, struct scatterlist *dst, |
1373 | unsigned int nbytes, void *priv) | ||
1376 | { | 1374 | { |
1377 | dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", | 1375 | struct scatterlist *t; |
1378 | dev->name, spage, soff, dpage, doff, nbytes, priv, ctx); | 1376 | struct page *spage, *dpage; |
1377 | unsigned int soff, doff; | ||
1378 | unsigned int n, len; | ||
1379 | 1379 | ||
1380 | hifn_setup_src_desc(dev, spage, soff, nbytes); | 1380 | n = nbytes; |
1381 | hifn_setup_cmd_desc(dev, ctx, priv, nbytes); | 1381 | while (n) { |
1382 | hifn_setup_dst_desc(dev, dpage, doff, nbytes); | 1382 | spage = sg_page(src); |
1383 | soff = src->offset; | ||
1384 | len = min(src->length, n); | ||
1385 | |||
1386 | hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); | ||
1387 | |||
1388 | src++; | ||
1389 | n -= len; | ||
1390 | } | ||
1391 | |||
1392 | t = &rctx->walk.cache[0]; | ||
1393 | n = nbytes; | ||
1394 | while (n) { | ||
1395 | if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { | ||
1396 | BUG_ON(!sg_page(t)); | ||
1397 | dpage = sg_page(t); | ||
1398 | doff = 0; | ||
1399 | len = t->length; | ||
1400 | } else { | ||
1401 | BUG_ON(!sg_page(dst)); | ||
1402 | dpage = sg_page(dst); | ||
1403 | doff = dst->offset; | ||
1404 | len = dst->length; | ||
1405 | } | ||
1406 | len = min(len, n); | ||
1407 | |||
1408 | hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); | ||
1409 | |||
1410 | dst++; | ||
1411 | t++; | ||
1412 | n -= len; | ||
1413 | } | ||
1414 | |||
1415 | hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes); | ||
1383 | hifn_setup_res_desc(dev); | 1416 | hifn_setup_res_desc(dev); |
1384 | return 0; | 1417 | return 0; |
1385 | } | 1418 | } |
@@ -1424,32 +1457,26 @@ static void ablkcipher_walk_exit(struct ablkcipher_walk *w) | |||
1424 | w->num = 0; | 1457 | w->num = 0; |
1425 | } | 1458 | } |
1426 | 1459 | ||
1427 | static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src, | 1460 | static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, |
1428 | unsigned int size, unsigned int *nbytesp) | 1461 | unsigned int size, unsigned int *nbytesp) |
1429 | { | 1462 | { |
1430 | unsigned int copy, drest = *drestp, nbytes = *nbytesp; | 1463 | unsigned int copy, drest = *drestp, nbytes = *nbytesp; |
1431 | int idx = 0; | 1464 | int idx = 0; |
1432 | void *saddr; | ||
1433 | 1465 | ||
1434 | if (drest < size || size > nbytes) | 1466 | if (drest < size || size > nbytes) |
1435 | return -EINVAL; | 1467 | return -EINVAL; |
1436 | 1468 | ||
1437 | while (size) { | 1469 | while (size) { |
1438 | copy = min(drest, min(size, src->length)); | 1470 | copy = min(drest, min(size, dst->length)); |
1439 | |||
1440 | saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); | ||
1441 | memcpy(daddr, saddr + src->offset, copy); | ||
1442 | kunmap_atomic(saddr, KM_SOFTIRQ1); | ||
1443 | 1471 | ||
1444 | size -= copy; | 1472 | size -= copy; |
1445 | drest -= copy; | 1473 | drest -= copy; |
1446 | nbytes -= copy; | 1474 | nbytes -= copy; |
1447 | daddr += copy; | ||
1448 | 1475 | ||
1449 | dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", | 1476 | dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", |
1450 | __func__, copy, size, drest, nbytes); | 1477 | __func__, copy, size, drest, nbytes); |
1451 | 1478 | ||
1452 | src++; | 1479 | dst++; |
1453 | idx++; | 1480 | idx++; |
1454 | } | 1481 | } |
1455 | 1482 | ||
@@ -1462,8 +1489,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist | |||
1462 | static int ablkcipher_walk(struct ablkcipher_request *req, | 1489 | static int ablkcipher_walk(struct ablkcipher_request *req, |
1463 | struct ablkcipher_walk *w) | 1490 | struct ablkcipher_walk *w) |
1464 | { | 1491 | { |
1465 | struct scatterlist *src, *dst, *t; | 1492 | struct scatterlist *dst, *t; |
1466 | void *daddr; | ||
1467 | unsigned int nbytes = req->nbytes, offset, copy, diff; | 1493 | unsigned int nbytes = req->nbytes, offset, copy, diff; |
1468 | int idx, tidx, err; | 1494 | int idx, tidx, err; |
1469 | 1495 | ||
@@ -1473,26 +1499,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, | |||
1473 | if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) | 1499 | if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) |
1474 | return -EINVAL; | 1500 | return -EINVAL; |
1475 | 1501 | ||
1476 | src = &req->src[idx]; | ||
1477 | dst = &req->dst[idx]; | 1502 | dst = &req->dst[idx]; |
1478 | 1503 | ||
1479 | dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " | 1504 | dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", |
1480 | "nbytes: %u.\n", | 1505 | __func__, dst->length, dst->offset, offset, nbytes); |
1481 | __func__, src->length, dst->length, src->offset, | ||
1482 | dst->offset, offset, nbytes); | ||
1483 | 1506 | ||
1484 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || | 1507 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || |
1485 | !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || | 1508 | !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || |
1486 | offset) { | 1509 | offset) { |
1487 | unsigned slen = min(src->length - offset, nbytes); | 1510 | unsigned slen = min(dst->length - offset, nbytes); |
1488 | unsigned dlen = PAGE_SIZE; | 1511 | unsigned dlen = PAGE_SIZE; |
1489 | 1512 | ||
1490 | t = &w->cache[idx]; | 1513 | t = &w->cache[idx]; |
1491 | 1514 | ||
1492 | daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); | 1515 | err = ablkcipher_add(&dlen, dst, slen, &nbytes); |
1493 | err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes); | ||
1494 | if (err < 0) | 1516 | if (err < 0) |
1495 | goto err_out_unmap; | 1517 | return err; |
1496 | 1518 | ||
1497 | idx += err; | 1519 | idx += err; |
1498 | 1520 | ||
@@ -1528,21 +1550,19 @@ static int ablkcipher_walk(struct ablkcipher_request *req, | |||
1528 | } else { | 1550 | } else { |
1529 | copy += diff + nbytes; | 1551 | copy += diff + nbytes; |
1530 | 1552 | ||
1531 | src = &req->src[idx]; | 1553 | dst = &req->dst[idx]; |
1532 | 1554 | ||
1533 | err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes); | 1555 | err = ablkcipher_add(&dlen, dst, nbytes, &nbytes); |
1534 | if (err < 0) | 1556 | if (err < 0) |
1535 | goto err_out_unmap; | 1557 | return err; |
1536 | 1558 | ||
1537 | idx += err; | 1559 | idx += err; |
1538 | } | 1560 | } |
1539 | 1561 | ||
1540 | t->length = copy; | 1562 | t->length = copy; |
1541 | t->offset = offset; | 1563 | t->offset = offset; |
1542 | |||
1543 | kunmap_atomic(daddr, KM_SOFTIRQ0); | ||
1544 | } else { | 1564 | } else { |
1545 | nbytes -= min(src->length, nbytes); | 1565 | nbytes -= min(dst->length, nbytes); |
1546 | idx++; | 1566 | idx++; |
1547 | } | 1567 | } |
1548 | 1568 | ||
@@ -1550,26 +1570,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, | |||
1550 | } | 1570 | } |
1551 | 1571 | ||
1552 | return tidx; | 1572 | return tidx; |
1553 | |||
1554 | err_out_unmap: | ||
1555 | kunmap_atomic(daddr, KM_SOFTIRQ0); | ||
1556 | return err; | ||
1557 | } | 1573 | } |
1558 | 1574 | ||
1559 | static int hifn_setup_session(struct ablkcipher_request *req) | 1575 | static int hifn_setup_session(struct ablkcipher_request *req) |
1560 | { | 1576 | { |
1561 | struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); | 1577 | struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); |
1578 | struct hifn_request_context *rctx = ablkcipher_request_ctx(req); | ||
1562 | struct hifn_device *dev = ctx->dev; | 1579 | struct hifn_device *dev = ctx->dev; |
1563 | struct page *spage, *dpage; | 1580 | unsigned long dlen, flags; |
1564 | unsigned long soff, doff, dlen, flags; | 1581 | unsigned int nbytes = req->nbytes, idx = 0; |
1565 | unsigned int nbytes = req->nbytes, idx = 0, len; | ||
1566 | int err = -EINVAL, sg_num; | 1582 | int err = -EINVAL, sg_num; |
1567 | struct scatterlist *src, *dst, *t; | 1583 | struct scatterlist *dst; |
1568 | 1584 | ||
1569 | if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) | 1585 | if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB) |
1570 | goto err_out_exit; | 1586 | goto err_out_exit; |
1571 | 1587 | ||
1572 | ctx->walk.flags = 0; | 1588 | rctx->walk.flags = 0; |
1573 | 1589 | ||
1574 | while (nbytes) { | 1590 | while (nbytes) { |
1575 | dst = &req->dst[idx]; | 1591 | dst = &req->dst[idx]; |
@@ -1577,27 +1593,23 @@ static int hifn_setup_session(struct ablkcipher_request *req) | |||
1577 | 1593 | ||
1578 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || | 1594 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || |
1579 | !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) | 1595 | !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) |
1580 | ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; | 1596 | rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; |
1581 | 1597 | ||
1582 | nbytes -= dlen; | 1598 | nbytes -= dlen; |
1583 | idx++; | 1599 | idx++; |
1584 | } | 1600 | } |
1585 | 1601 | ||
1586 | if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { | 1602 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { |
1587 | err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC); | 1603 | err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); |
1588 | if (err < 0) | 1604 | if (err < 0) |
1589 | return err; | 1605 | return err; |
1590 | } | 1606 | } |
1591 | 1607 | ||
1592 | nbytes = req->nbytes; | 1608 | sg_num = ablkcipher_walk(req, &rctx->walk); |
1593 | idx = 0; | ||
1594 | |||
1595 | sg_num = ablkcipher_walk(req, &ctx->walk); | ||
1596 | if (sg_num < 0) { | 1609 | if (sg_num < 0) { |
1597 | err = sg_num; | 1610 | err = sg_num; |
1598 | goto err_out_exit; | 1611 | goto err_out_exit; |
1599 | } | 1612 | } |
1600 | atomic_set(&ctx->sg_num, sg_num); | ||
1601 | 1613 | ||
1602 | spin_lock_irqsave(&dev->lock, flags); | 1614 | spin_lock_irqsave(&dev->lock, flags); |
1603 | if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { | 1615 | if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { |
@@ -1605,37 +1617,11 @@ static int hifn_setup_session(struct ablkcipher_request *req) | |||
1605 | goto err_out; | 1617 | goto err_out; |
1606 | } | 1618 | } |
1607 | 1619 | ||
1608 | dev->snum++; | 1620 | err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req); |
1609 | dev->started += sg_num; | 1621 | if (err) |
1610 | 1622 | goto err_out; | |
1611 | while (nbytes) { | ||
1612 | src = &req->src[idx]; | ||
1613 | dst = &req->dst[idx]; | ||
1614 | t = &ctx->walk.cache[idx]; | ||
1615 | |||
1616 | if (t->length) { | ||
1617 | spage = dpage = sg_page(t); | ||
1618 | soff = doff = 0; | ||
1619 | len = t->length; | ||
1620 | } else { | ||
1621 | spage = sg_page(src); | ||
1622 | soff = src->offset; | ||
1623 | |||
1624 | dpage = sg_page(dst); | ||
1625 | doff = dst->offset; | ||
1626 | |||
1627 | len = dst->length; | ||
1628 | } | ||
1629 | |||
1630 | idx++; | ||
1631 | |||
1632 | err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes, | ||
1633 | req, ctx); | ||
1634 | if (err) | ||
1635 | goto err_out; | ||
1636 | 1623 | ||
1637 | nbytes -= min(len, nbytes); | 1624 | dev->snum++; |
1638 | } | ||
1639 | 1625 | ||
1640 | dev->active = HIFN_DEFAULT_ACTIVE_NUM; | 1626 | dev->active = HIFN_DEFAULT_ACTIVE_NUM; |
1641 | spin_unlock_irqrestore(&dev->lock, flags); | 1627 | spin_unlock_irqrestore(&dev->lock, flags); |
@@ -1645,12 +1631,13 @@ static int hifn_setup_session(struct ablkcipher_request *req) | |||
1645 | err_out: | 1631 | err_out: |
1646 | spin_unlock_irqrestore(&dev->lock, flags); | 1632 | spin_unlock_irqrestore(&dev->lock, flags); |
1647 | err_out_exit: | 1633 | err_out_exit: |
1648 | if (err) | 1634 | if (err) { |
1649 | dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " | 1635 | printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " |
1650 | "type: %u, err: %d.\n", | 1636 | "type: %u, err: %d.\n", |
1651 | dev->name, ctx->iv, ctx->ivsize, | 1637 | dev->name, rctx->iv, rctx->ivsize, |
1652 | ctx->key, ctx->keysize, | 1638 | ctx->key, ctx->keysize, |
1653 | ctx->mode, ctx->op, ctx->type, err); | 1639 | rctx->mode, rctx->op, rctx->type, err); |
1640 | } | ||
1654 | 1641 | ||
1655 | return err; | 1642 | return err; |
1656 | } | 1643 | } |
@@ -1660,31 +1647,33 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) | |||
1660 | int n, err; | 1647 | int n, err; |
1661 | u8 src[16]; | 1648 | u8 src[16]; |
1662 | struct hifn_context ctx; | 1649 | struct hifn_context ctx; |
1650 | struct hifn_request_context rctx; | ||
1663 | u8 fips_aes_ecb_from_zero[16] = { | 1651 | u8 fips_aes_ecb_from_zero[16] = { |
1664 | 0x66, 0xE9, 0x4B, 0xD4, | 1652 | 0x66, 0xE9, 0x4B, 0xD4, |
1665 | 0xEF, 0x8A, 0x2C, 0x3B, | 1653 | 0xEF, 0x8A, 0x2C, 0x3B, |
1666 | 0x88, 0x4C, 0xFA, 0x59, | 1654 | 0x88, 0x4C, 0xFA, 0x59, |
1667 | 0xCA, 0x34, 0x2B, 0x2E}; | 1655 | 0xCA, 0x34, 0x2B, 0x2E}; |
1656 | struct scatterlist sg; | ||
1668 | 1657 | ||
1669 | memset(src, 0, sizeof(src)); | 1658 | memset(src, 0, sizeof(src)); |
1670 | memset(ctx.key, 0, sizeof(ctx.key)); | 1659 | memset(ctx.key, 0, sizeof(ctx.key)); |
1671 | 1660 | ||
1672 | ctx.dev = dev; | 1661 | ctx.dev = dev; |
1673 | ctx.keysize = 16; | 1662 | ctx.keysize = 16; |
1674 | ctx.ivsize = 0; | 1663 | rctx.ivsize = 0; |
1675 | ctx.iv = NULL; | 1664 | rctx.iv = NULL; |
1676 | ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; | 1665 | rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; |
1677 | ctx.mode = ACRYPTO_MODE_ECB; | 1666 | rctx.mode = ACRYPTO_MODE_ECB; |
1678 | ctx.type = ACRYPTO_TYPE_AES_128; | 1667 | rctx.type = ACRYPTO_TYPE_AES_128; |
1679 | atomic_set(&ctx.sg_num, 1); | 1668 | rctx.walk.cache[0].length = 0; |
1680 | 1669 | ||
1681 | err = hifn_setup_dma(dev, | 1670 | sg_init_one(&sg, &src, sizeof(src)); |
1682 | virt_to_page(src), offset_in_page(src), | 1671 | |
1683 | virt_to_page(src), offset_in_page(src), | 1672 | err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); |
1684 | sizeof(src), NULL, &ctx); | ||
1685 | if (err) | 1673 | if (err) |
1686 | goto err_out; | 1674 | goto err_out; |
1687 | 1675 | ||
1676 | dev->started = 0; | ||
1688 | msleep(200); | 1677 | msleep(200); |
1689 | 1678 | ||
1690 | dprintk("%s: decoded: ", dev->name); | 1679 | dprintk("%s: decoded: ", dev->name); |
@@ -1711,6 +1700,7 @@ static int hifn_start_device(struct hifn_device *dev) | |||
1711 | { | 1700 | { |
1712 | int err; | 1701 | int err; |
1713 | 1702 | ||
1703 | dev->started = dev->active = 0; | ||
1714 | hifn_reset_dma(dev, 1); | 1704 | hifn_reset_dma(dev, 1); |
1715 | 1705 | ||
1716 | err = hifn_enable_crypto(dev); | 1706 | err = hifn_enable_crypto(dev); |
@@ -1764,90 +1754,65 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset | |||
1764 | return idx; | 1754 | return idx; |
1765 | } | 1755 | } |
1766 | 1756 | ||
1767 | static void hifn_process_ready(struct ablkcipher_request *req, int error) | 1757 | static inline void hifn_complete_sa(struct hifn_device *dev, int i) |
1768 | { | 1758 | { |
1769 | struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); | 1759 | unsigned long flags; |
1770 | struct hifn_device *dev; | ||
1771 | |||
1772 | dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx); | ||
1773 | 1760 | ||
1774 | dev = ctx->dev; | 1761 | spin_lock_irqsave(&dev->lock, flags); |
1775 | dprintk("%s: req: %p, started: %d, sg_num: %d.\n", | 1762 | dev->sa[i] = NULL; |
1776 | __func__, req, dev->started, atomic_read(&ctx->sg_num)); | 1763 | dev->started--; |
1764 | if (dev->started < 0) | ||
1765 | printk("%s: started: %d.\n", __func__, dev->started); | ||
1766 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1767 | BUG_ON(dev->started < 0); | ||
1768 | } | ||
1777 | 1769 | ||
1778 | if (--dev->started < 0) | 1770 | static void hifn_process_ready(struct ablkcipher_request *req, int error) |
1779 | BUG(); | 1771 | { |
1772 | struct hifn_request_context *rctx = ablkcipher_request_ctx(req); | ||
1780 | 1773 | ||
1781 | if (atomic_dec_and_test(&ctx->sg_num)) { | 1774 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { |
1782 | unsigned int nbytes = req->nbytes; | 1775 | unsigned int nbytes = req->nbytes; |
1783 | int idx = 0, err; | 1776 | int idx = 0, err; |
1784 | struct scatterlist *dst, *t; | 1777 | struct scatterlist *dst, *t; |
1785 | void *saddr; | 1778 | void *saddr; |
1786 | 1779 | ||
1787 | if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { | 1780 | while (nbytes) { |
1788 | while (nbytes) { | 1781 | t = &rctx->walk.cache[idx]; |
1789 | t = &ctx->walk.cache[idx]; | 1782 | dst = &req->dst[idx]; |
1790 | dst = &req->dst[idx]; | ||
1791 | |||
1792 | dprintk("\n%s: sg_page(t): %p, t->length: %u, " | ||
1793 | "sg_page(dst): %p, dst->length: %u, " | ||
1794 | "nbytes: %u.\n", | ||
1795 | __func__, sg_page(t), t->length, | ||
1796 | sg_page(dst), dst->length, nbytes); | ||
1797 | 1783 | ||
1798 | if (!t->length) { | 1784 | dprintk("\n%s: sg_page(t): %p, t->length: %u, " |
1799 | nbytes -= min(dst->length, nbytes); | 1785 | "sg_page(dst): %p, dst->length: %u, " |
1800 | idx++; | 1786 | "nbytes: %u.\n", |
1801 | continue; | 1787 | __func__, sg_page(t), t->length, |
1802 | } | 1788 | sg_page(dst), dst->length, nbytes); |
1803 | 1789 | ||
1804 | saddr = kmap_atomic(sg_page(t), KM_IRQ1); | 1790 | if (!t->length) { |
1791 | nbytes -= min(dst->length, nbytes); | ||
1792 | idx++; | ||
1793 | continue; | ||
1794 | } | ||
1805 | 1795 | ||
1806 | err = ablkcipher_get(saddr, &t->length, t->offset, | 1796 | saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); |
1807 | dst, nbytes, &nbytes); | ||
1808 | if (err < 0) { | ||
1809 | kunmap_atomic(saddr, KM_IRQ1); | ||
1810 | break; | ||
1811 | } | ||
1812 | 1797 | ||
1813 | idx += err; | 1798 | err = ablkcipher_get(saddr, &t->length, t->offset, |
1814 | kunmap_atomic(saddr, KM_IRQ1); | 1799 | dst, nbytes, &nbytes); |
1800 | if (err < 0) { | ||
1801 | kunmap_atomic(saddr, KM_SOFTIRQ0); | ||
1802 | break; | ||
1815 | } | 1803 | } |
1816 | 1804 | ||
1817 | ablkcipher_walk_exit(&ctx->walk); | 1805 | idx += err; |
1806 | kunmap_atomic(saddr, KM_SOFTIRQ0); | ||
1818 | } | 1807 | } |
1819 | 1808 | ||
1820 | req->base.complete(&req->base, error); | 1809 | ablkcipher_walk_exit(&rctx->walk); |
1821 | } | 1810 | } |
1822 | } | ||
1823 | 1811 | ||
1824 | static void hifn_check_for_completion(struct hifn_device *dev, int error) | 1812 | req->base.complete(&req->base, error); |
1825 | { | ||
1826 | int i; | ||
1827 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | ||
1828 | |||
1829 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { | ||
1830 | struct hifn_desc *d = &dma->resr[i]; | ||
1831 | |||
1832 | if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) { | ||
1833 | dev->success++; | ||
1834 | dev->reset = 0; | ||
1835 | hifn_process_ready(dev->sa[i], error); | ||
1836 | dev->sa[i] = NULL; | ||
1837 | } | ||
1838 | |||
1839 | if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER)) | ||
1840 | if (printk_ratelimit()) | ||
1841 | printk("%s: overflow detected [d: %u, o: %u] " | ||
1842 | "at %d resr: l: %08x, p: %08x.\n", | ||
1843 | dev->name, | ||
1844 | !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)), | ||
1845 | !!(d->l & __cpu_to_le32(HIFN_D_OVER)), | ||
1846 | i, d->l, d->p); | ||
1847 | } | ||
1848 | } | 1813 | } |
1849 | 1814 | ||
1850 | static void hifn_clear_rings(struct hifn_device *dev) | 1815 | static void hifn_clear_rings(struct hifn_device *dev, int error) |
1851 | { | 1816 | { |
1852 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1817 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1853 | int i, u; | 1818 | int i, u; |
@@ -1864,21 +1829,26 @@ static void hifn_clear_rings(struct hifn_device *dev) | |||
1864 | if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) | 1829 | if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) |
1865 | break; | 1830 | break; |
1866 | 1831 | ||
1867 | if (i != HIFN_D_RES_RSIZE) | 1832 | if (dev->sa[i]) { |
1868 | u--; | 1833 | dev->success++; |
1834 | dev->reset = 0; | ||
1835 | hifn_process_ready(dev->sa[i], error); | ||
1836 | hifn_complete_sa(dev, i); | ||
1837 | } | ||
1869 | 1838 | ||
1870 | if (++i == (HIFN_D_RES_RSIZE + 1)) | 1839 | if (++i == HIFN_D_RES_RSIZE) |
1871 | i = 0; | 1840 | i = 0; |
1841 | u--; | ||
1872 | } | 1842 | } |
1873 | dma->resk = i; dma->resu = u; | 1843 | dma->resk = i; dma->resu = u; |
1874 | 1844 | ||
1875 | i = dma->srck; u = dma->srcu; | 1845 | i = dma->srck; u = dma->srcu; |
1876 | while (u != 0) { | 1846 | while (u != 0) { |
1877 | if (i == HIFN_D_SRC_RSIZE) | ||
1878 | i = 0; | ||
1879 | if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) | 1847 | if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) |
1880 | break; | 1848 | break; |
1881 | i++, u--; | 1849 | if (++i == HIFN_D_SRC_RSIZE) |
1850 | i = 0; | ||
1851 | u--; | ||
1882 | } | 1852 | } |
1883 | dma->srck = i; dma->srcu = u; | 1853 | dma->srck = i; dma->srcu = u; |
1884 | 1854 | ||
@@ -1886,20 +1856,19 @@ static void hifn_clear_rings(struct hifn_device *dev) | |||
1886 | while (u != 0) { | 1856 | while (u != 0) { |
1887 | if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) | 1857 | if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) |
1888 | break; | 1858 | break; |
1889 | if (i != HIFN_D_CMD_RSIZE) | 1859 | if (++i == HIFN_D_CMD_RSIZE) |
1890 | u--; | ||
1891 | if (++i == (HIFN_D_CMD_RSIZE + 1)) | ||
1892 | i = 0; | 1860 | i = 0; |
1861 | u--; | ||
1893 | } | 1862 | } |
1894 | dma->cmdk = i; dma->cmdu = u; | 1863 | dma->cmdk = i; dma->cmdu = u; |
1895 | 1864 | ||
1896 | i = dma->dstk; u = dma->dstu; | 1865 | i = dma->dstk; u = dma->dstu; |
1897 | while (u != 0) { | 1866 | while (u != 0) { |
1898 | if (i == HIFN_D_DST_RSIZE) | ||
1899 | i = 0; | ||
1900 | if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) | 1867 | if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) |
1901 | break; | 1868 | break; |
1902 | i++, u--; | 1869 | if (++i == HIFN_D_DST_RSIZE) |
1870 | i = 0; | ||
1871 | u--; | ||
1903 | } | 1872 | } |
1904 | dma->dstk = i; dma->dstu = u; | 1873 | dma->dstk = i; dma->dstu = u; |
1905 | 1874 | ||
@@ -1944,30 +1913,39 @@ static void hifn_work(struct work_struct *work) | |||
1944 | } else | 1913 | } else |
1945 | dev->active--; | 1914 | dev->active--; |
1946 | 1915 | ||
1947 | if (dev->prev_success == dev->success && dev->started) | 1916 | if ((dev->prev_success == dev->success) && dev->started) |
1948 | reset = 1; | 1917 | reset = 1; |
1949 | dev->prev_success = dev->success; | 1918 | dev->prev_success = dev->success; |
1950 | spin_unlock_irqrestore(&dev->lock, flags); | 1919 | spin_unlock_irqrestore(&dev->lock, flags); |
1951 | 1920 | ||
1952 | if (reset) { | 1921 | if (reset) { |
1953 | dprintk("%s: r: %08x, active: %d, started: %d, " | ||
1954 | "success: %lu: reset: %d.\n", | ||
1955 | dev->name, r, dev->active, dev->started, | ||
1956 | dev->success, reset); | ||
1957 | |||
1958 | if (++dev->reset >= 5) { | 1922 | if (++dev->reset >= 5) { |
1959 | dprintk("%s: really hard reset.\n", dev->name); | 1923 | int i; |
1924 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | ||
1925 | |||
1926 | printk("%s: r: %08x, active: %d, started: %d, " | ||
1927 | "success: %lu: qlen: %u/%u, reset: %d.\n", | ||
1928 | dev->name, r, dev->active, dev->started, | ||
1929 | dev->success, dev->queue.qlen, dev->queue.max_qlen, | ||
1930 | reset); | ||
1931 | |||
1932 | printk("%s: res: ", __func__); | ||
1933 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { | ||
1934 | printk("%x.%p ", dma->resr[i].l, dev->sa[i]); | ||
1935 | if (dev->sa[i]) { | ||
1936 | hifn_process_ready(dev->sa[i], -ENODEV); | ||
1937 | hifn_complete_sa(dev, i); | ||
1938 | } | ||
1939 | } | ||
1940 | printk("\n"); | ||
1941 | |||
1960 | hifn_reset_dma(dev, 1); | 1942 | hifn_reset_dma(dev, 1); |
1961 | hifn_stop_device(dev); | 1943 | hifn_stop_device(dev); |
1962 | hifn_start_device(dev); | 1944 | hifn_start_device(dev); |
1963 | dev->reset = 0; | 1945 | dev->reset = 0; |
1964 | } | 1946 | } |
1965 | 1947 | ||
1966 | spin_lock_irqsave(&dev->lock, flags); | 1948 | tasklet_schedule(&dev->tasklet); |
1967 | hifn_check_for_completion(dev, -EBUSY); | ||
1968 | hifn_clear_rings(dev); | ||
1969 | dev->started = 0; | ||
1970 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1971 | } | 1949 | } |
1972 | 1950 | ||
1973 | schedule_delayed_work(&dev->work, HZ); | 1951 | schedule_delayed_work(&dev->work, HZ); |
@@ -1984,8 +1962,8 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
1984 | dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " | 1962 | dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " |
1985 | "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", | 1963 | "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", |
1986 | dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, | 1964 | dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, |
1987 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, | 1965 | dma->cmdi, dma->srci, dma->dsti, dma->resi, |
1988 | dma->cmdi, dma->srci, dma->dsti, dma->resi); | 1966 | dma->cmdu, dma->srcu, dma->dstu, dma->resu); |
1989 | 1967 | ||
1990 | if ((dmacsr & dev->dmareg) == 0) | 1968 | if ((dmacsr & dev->dmareg) == 0) |
1991 | return IRQ_NONE; | 1969 | return IRQ_NONE; |
@@ -2002,11 +1980,10 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
2002 | if (restart) { | 1980 | if (restart) { |
2003 | u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); | 1981 | u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); |
2004 | 1982 | ||
2005 | if (printk_ratelimit()) | 1983 | printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", |
2006 | printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", | 1984 | dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), |
2007 | dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), | 1985 | !!(dmacsr & HIFN_DMACSR_D_OVER), |
2008 | !!(dmacsr & HIFN_DMACSR_D_OVER), | 1986 | puisr, !!(puisr & HIFN_PUISR_DSTOVER)); |
2009 | puisr, !!(puisr & HIFN_PUISR_DSTOVER)); | ||
2010 | if (!!(puisr & HIFN_PUISR_DSTOVER)) | 1987 | if (!!(puisr & HIFN_PUISR_DSTOVER)) |
2011 | hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); | 1988 | hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); |
2012 | hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | | 1989 | hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | |
@@ -2016,12 +1993,11 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
2016 | restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | | 1993 | restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | |
2017 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); | 1994 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); |
2018 | if (restart) { | 1995 | if (restart) { |
2019 | if (printk_ratelimit()) | 1996 | printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", |
2020 | printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n", | 1997 | dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), |
2021 | dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), | 1998 | !!(dmacsr & HIFN_DMACSR_S_ABORT), |
2022 | !!(dmacsr & HIFN_DMACSR_S_ABORT), | 1999 | !!(dmacsr & HIFN_DMACSR_D_ABORT), |
2023 | !!(dmacsr & HIFN_DMACSR_D_ABORT), | 2000 | !!(dmacsr & HIFN_DMACSR_R_ABORT)); |
2024 | !!(dmacsr & HIFN_DMACSR_R_ABORT)); | ||
2025 | hifn_reset_dma(dev, 1); | 2001 | hifn_reset_dma(dev, 1); |
2026 | hifn_init_dma(dev); | 2002 | hifn_init_dma(dev); |
2027 | hifn_init_registers(dev); | 2003 | hifn_init_registers(dev); |
@@ -2034,7 +2010,6 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
2034 | } | 2010 | } |
2035 | 2011 | ||
2036 | tasklet_schedule(&dev->tasklet); | 2012 | tasklet_schedule(&dev->tasklet); |
2037 | hifn_clear_rings(dev); | ||
2038 | 2013 | ||
2039 | return IRQ_HANDLED; | 2014 | return IRQ_HANDLED; |
2040 | } | 2015 | } |
@@ -2048,21 +2023,25 @@ static void hifn_flush(struct hifn_device *dev) | |||
2048 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 2023 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
2049 | int i; | 2024 | int i; |
2050 | 2025 | ||
2051 | spin_lock_irqsave(&dev->lock, flags); | ||
2052 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { | 2026 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { |
2053 | struct hifn_desc *d = &dma->resr[i]; | 2027 | struct hifn_desc *d = &dma->resr[i]; |
2054 | 2028 | ||
2055 | if (dev->sa[i]) { | 2029 | if (dev->sa[i]) { |
2056 | hifn_process_ready(dev->sa[i], | 2030 | hifn_process_ready(dev->sa[i], |
2057 | (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); | 2031 | (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); |
2032 | hifn_complete_sa(dev, i); | ||
2058 | } | 2033 | } |
2059 | } | 2034 | } |
2060 | 2035 | ||
2036 | spin_lock_irqsave(&dev->lock, flags); | ||
2061 | while ((async_req = crypto_dequeue_request(&dev->queue))) { | 2037 | while ((async_req = crypto_dequeue_request(&dev->queue))) { |
2062 | ctx = crypto_tfm_ctx(async_req->tfm); | 2038 | ctx = crypto_tfm_ctx(async_req->tfm); |
2063 | req = container_of(async_req, struct ablkcipher_request, base); | 2039 | req = container_of(async_req, struct ablkcipher_request, base); |
2040 | spin_unlock_irqrestore(&dev->lock, flags); | ||
2064 | 2041 | ||
2065 | hifn_process_ready(req, -ENODEV); | 2042 | hifn_process_ready(req, -ENODEV); |
2043 | |||
2044 | spin_lock_irqsave(&dev->lock, flags); | ||
2066 | } | 2045 | } |
2067 | spin_unlock_irqrestore(&dev->lock, flags); | 2046 | spin_unlock_irqrestore(&dev->lock, flags); |
2068 | } | 2047 | } |
@@ -2121,6 +2100,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, | |||
2121 | u8 type, u8 mode) | 2100 | u8 type, u8 mode) |
2122 | { | 2101 | { |
2123 | struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); | 2102 | struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); |
2103 | struct hifn_request_context *rctx = ablkcipher_request_ctx(req); | ||
2124 | unsigned ivsize; | 2104 | unsigned ivsize; |
2125 | 2105 | ||
2126 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); | 2106 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); |
@@ -2141,11 +2121,11 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, | |||
2141 | type = ACRYPTO_TYPE_AES_256; | 2121 | type = ACRYPTO_TYPE_AES_256; |
2142 | } | 2122 | } |
2143 | 2123 | ||
2144 | ctx->op = op; | 2124 | rctx->op = op; |
2145 | ctx->mode = mode; | 2125 | rctx->mode = mode; |
2146 | ctx->type = type; | 2126 | rctx->type = type; |
2147 | ctx->iv = req->info; | 2127 | rctx->iv = req->info; |
2148 | ctx->ivsize = ivsize; | 2128 | rctx->ivsize = ivsize; |
2149 | 2129 | ||
2150 | /* | 2130 | /* |
2151 | * HEAVY TODO: needs to kick Herbert XU to write documentation. | 2131 | * HEAVY TODO: needs to kick Herbert XU to write documentation. |
@@ -2158,7 +2138,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, | |||
2158 | 2138 | ||
2159 | static int hifn_process_queue(struct hifn_device *dev) | 2139 | static int hifn_process_queue(struct hifn_device *dev) |
2160 | { | 2140 | { |
2161 | struct crypto_async_request *async_req; | 2141 | struct crypto_async_request *async_req, *backlog; |
2162 | struct hifn_context *ctx; | 2142 | struct hifn_context *ctx; |
2163 | struct ablkcipher_request *req; | 2143 | struct ablkcipher_request *req; |
2164 | unsigned long flags; | 2144 | unsigned long flags; |
@@ -2166,12 +2146,16 @@ static int hifn_process_queue(struct hifn_device *dev) | |||
2166 | 2146 | ||
2167 | while (dev->started < HIFN_QUEUE_LENGTH) { | 2147 | while (dev->started < HIFN_QUEUE_LENGTH) { |
2168 | spin_lock_irqsave(&dev->lock, flags); | 2148 | spin_lock_irqsave(&dev->lock, flags); |
2149 | backlog = crypto_get_backlog(&dev->queue); | ||
2169 | async_req = crypto_dequeue_request(&dev->queue); | 2150 | async_req = crypto_dequeue_request(&dev->queue); |
2170 | spin_unlock_irqrestore(&dev->lock, flags); | 2151 | spin_unlock_irqrestore(&dev->lock, flags); |
2171 | 2152 | ||
2172 | if (!async_req) | 2153 | if (!async_req) |
2173 | break; | 2154 | break; |
2174 | 2155 | ||
2156 | if (backlog) | ||
2157 | backlog->complete(backlog, -EINPROGRESS); | ||
2158 | |||
2175 | ctx = crypto_tfm_ctx(async_req->tfm); | 2159 | ctx = crypto_tfm_ctx(async_req->tfm); |
2176 | req = container_of(async_req, struct ablkcipher_request, base); | 2160 | req = container_of(async_req, struct ablkcipher_request, base); |
2177 | 2161 | ||
@@ -2496,7 +2480,7 @@ static int hifn_cra_init(struct crypto_tfm *tfm) | |||
2496 | struct hifn_context *ctx = crypto_tfm_ctx(tfm); | 2480 | struct hifn_context *ctx = crypto_tfm_ctx(tfm); |
2497 | 2481 | ||
2498 | ctx->dev = ha->dev; | 2482 | ctx->dev = ha->dev; |
2499 | 2483 | tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context); | |
2500 | return 0; | 2484 | return 0; |
2501 | } | 2485 | } |
2502 | 2486 | ||
@@ -2574,7 +2558,10 @@ static void hifn_tasklet_callback(unsigned long data) | |||
2574 | * (like dev->success), but they are used in process | 2558 | * (like dev->success), but they are used in process |
2575 | * context or update is atomic (like setting dev->sa[i] to NULL). | 2559 | * context or update is atomic (like setting dev->sa[i] to NULL). |
2576 | */ | 2560 | */ |
2577 | hifn_check_for_completion(dev, 0); | 2561 | hifn_clear_rings(dev, 0); |
2562 | |||
2563 | if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) | ||
2564 | hifn_process_queue(dev); | ||
2578 | } | 2565 | } |
2579 | 2566 | ||
2580 | static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 2567 | static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
@@ -2631,22 +2618,11 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2631 | goto err_out_unmap_bars; | 2618 | goto err_out_unmap_bars; |
2632 | } | 2619 | } |
2633 | 2620 | ||
2634 | dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER); | ||
2635 | if (!dev->result_mem) { | ||
2636 | dprintk("Failed to allocate %d pages for result_mem.\n", | ||
2637 | HIFN_MAX_RESULT_ORDER); | ||
2638 | goto err_out_unmap_bars; | ||
2639 | } | ||
2640 | memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER)); | ||
2641 | |||
2642 | dev->dst = pci_map_single(pdev, (void *)dev->result_mem, | ||
2643 | PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE); | ||
2644 | |||
2645 | dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), | 2621 | dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), |
2646 | &dev->desc_dma); | 2622 | &dev->desc_dma); |
2647 | if (!dev->desc_virt) { | 2623 | if (!dev->desc_virt) { |
2648 | dprintk("Failed to allocate descriptor rings.\n"); | 2624 | dprintk("Failed to allocate descriptor rings.\n"); |
2649 | goto err_out_free_result_pages; | 2625 | goto err_out_unmap_bars; |
2650 | } | 2626 | } |
2651 | memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); | 2627 | memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); |
2652 | 2628 | ||
@@ -2706,11 +2682,6 @@ err_out_free_desc: | |||
2706 | pci_free_consistent(pdev, sizeof(struct hifn_dma), | 2682 | pci_free_consistent(pdev, sizeof(struct hifn_dma), |
2707 | dev->desc_virt, dev->desc_dma); | 2683 | dev->desc_virt, dev->desc_dma); |
2708 | 2684 | ||
2709 | err_out_free_result_pages: | ||
2710 | pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER, | ||
2711 | PCI_DMA_FROMDEVICE); | ||
2712 | free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); | ||
2713 | |||
2714 | err_out_unmap_bars: | 2685 | err_out_unmap_bars: |
2715 | for (i=0; i<3; ++i) | 2686 | for (i=0; i<3; ++i) |
2716 | if (dev->bar[i]) | 2687 | if (dev->bar[i]) |
@@ -2748,10 +2719,6 @@ static void hifn_remove(struct pci_dev *pdev) | |||
2748 | 2719 | ||
2749 | pci_free_consistent(pdev, sizeof(struct hifn_dma), | 2720 | pci_free_consistent(pdev, sizeof(struct hifn_dma), |
2750 | dev->desc_virt, dev->desc_dma); | 2721 | dev->desc_virt, dev->desc_dma); |
2751 | pci_unmap_single(pdev, dev->dst, | ||
2752 | PAGE_SIZE << HIFN_MAX_RESULT_ORDER, | ||
2753 | PCI_DMA_FROMDEVICE); | ||
2754 | free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); | ||
2755 | for (i=0; i<3; ++i) | 2722 | for (i=0; i<3; ++i) |
2756 | if (dev->bar[i]) | 2723 | if (dev->bar[i]) |
2757 | iounmap(dev->bar[i]); | 2724 | iounmap(dev->bar[i]); |
@@ -2782,6 +2749,11 @@ static int __devinit hifn_init(void) | |||
2782 | unsigned int freq; | 2749 | unsigned int freq; |
2783 | int err; | 2750 | int err; |
2784 | 2751 | ||
2752 | if (sizeof(dma_addr_t) > 4) { | ||
2753 | printk(KERN_INFO "HIFN supports only 32-bit addresses.\n"); | ||
2754 | return -EINVAL; | ||
2755 | } | ||
2756 | |||
2785 | if (strncmp(hifn_pll_ref, "ext", 3) && | 2757 | if (strncmp(hifn_pll_ref, "ext", 3) && |
2786 | strncmp(hifn_pll_ref, "pci", 3)) { | 2758 | strncmp(hifn_pll_ref, "pci", 3)) { |
2787 | printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " | 2759 | printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index bf2917d197a0..856b3cc25583 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/percpu.h> | ||
19 | #include <linux/smp.h> | ||
18 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
19 | #include <asm/i387.h> | 21 | #include <asm/i387.h> |
20 | #include "padlock.h" | 22 | #include "padlock.h" |
@@ -49,6 +51,8 @@ struct aes_ctx { | |||
49 | u32 *D; | 51 | u32 *D; |
50 | }; | 52 | }; |
51 | 53 | ||
54 | static DEFINE_PER_CPU(struct cword *, last_cword); | ||
55 | |||
52 | /* Tells whether the ACE is capable to generate | 56 | /* Tells whether the ACE is capable to generate |
53 | the extended key for a given key_len. */ | 57 | the extended key for a given key_len. */ |
54 | static inline int | 58 | static inline int |
@@ -89,6 +93,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
89 | const __le32 *key = (const __le32 *)in_key; | 93 | const __le32 *key = (const __le32 *)in_key; |
90 | u32 *flags = &tfm->crt_flags; | 94 | u32 *flags = &tfm->crt_flags; |
91 | struct crypto_aes_ctx gen_aes; | 95 | struct crypto_aes_ctx gen_aes; |
96 | int cpu; | ||
92 | 97 | ||
93 | if (key_len % 8) { | 98 | if (key_len % 8) { |
94 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 99 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
@@ -118,7 +123,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
118 | 123 | ||
119 | /* Don't generate extended keys if the hardware can do it. */ | 124 | /* Don't generate extended keys if the hardware can do it. */ |
120 | if (aes_hw_extkey_available(key_len)) | 125 | if (aes_hw_extkey_available(key_len)) |
121 | return 0; | 126 | goto ok; |
122 | 127 | ||
123 | ctx->D = ctx->d_data; | 128 | ctx->D = ctx->d_data; |
124 | ctx->cword.encrypt.keygen = 1; | 129 | ctx->cword.encrypt.keygen = 1; |
@@ -131,15 +136,30 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
131 | 136 | ||
132 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); | 137 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); |
133 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); | 138 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); |
139 | |||
140 | ok: | ||
141 | for_each_online_cpu(cpu) | ||
142 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || | ||
143 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) | ||
144 | per_cpu(last_cword, cpu) = NULL; | ||
145 | |||
134 | return 0; | 146 | return 0; |
135 | } | 147 | } |
136 | 148 | ||
137 | /* ====== Encryption/decryption routines ====== */ | 149 | /* ====== Encryption/decryption routines ====== */ |
138 | 150 | ||
139 | /* These are the real call to PadLock. */ | 151 | /* These are the real call to PadLock. */ |
140 | static inline void padlock_reset_key(void) | 152 | static inline void padlock_reset_key(struct cword *cword) |
153 | { | ||
154 | int cpu = raw_smp_processor_id(); | ||
155 | |||
156 | if (cword != per_cpu(last_cword, cpu)) | ||
157 | asm volatile ("pushfl; popfl"); | ||
158 | } | ||
159 | |||
160 | static inline void padlock_store_cword(struct cword *cword) | ||
141 | { | 161 | { |
142 | asm volatile ("pushfl; popfl"); | 162 | per_cpu(last_cword, raw_smp_processor_id()) = cword; |
143 | } | 163 | } |
144 | 164 | ||
145 | /* | 165 | /* |
@@ -149,7 +169,7 @@ static inline void padlock_reset_key(void) | |||
149 | */ | 169 | */ |
150 | 170 | ||
151 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, | 171 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, |
152 | void *control_word) | 172 | struct cword *control_word) |
153 | { | 173 | { |
154 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 174 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
155 | : "+S"(input), "+D"(output) | 175 | : "+S"(input), "+D"(output) |
@@ -213,22 +233,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
213 | { | 233 | { |
214 | struct aes_ctx *ctx = aes_ctx(tfm); | 234 | struct aes_ctx *ctx = aes_ctx(tfm); |
215 | int ts_state; | 235 | int ts_state; |
216 | padlock_reset_key(); | ||
217 | 236 | ||
237 | padlock_reset_key(&ctx->cword.encrypt); | ||
218 | ts_state = irq_ts_save(); | 238 | ts_state = irq_ts_save(); |
219 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); | 239 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); |
220 | irq_ts_restore(ts_state); | 240 | irq_ts_restore(ts_state); |
241 | padlock_store_cword(&ctx->cword.encrypt); | ||
221 | } | 242 | } |
222 | 243 | ||
223 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 244 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
224 | { | 245 | { |
225 | struct aes_ctx *ctx = aes_ctx(tfm); | 246 | struct aes_ctx *ctx = aes_ctx(tfm); |
226 | int ts_state; | 247 | int ts_state; |
227 | padlock_reset_key(); | ||
228 | 248 | ||
249 | padlock_reset_key(&ctx->cword.encrypt); | ||
229 | ts_state = irq_ts_save(); | 250 | ts_state = irq_ts_save(); |
230 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); | 251 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); |
231 | irq_ts_restore(ts_state); | 252 | irq_ts_restore(ts_state); |
253 | padlock_store_cword(&ctx->cword.encrypt); | ||
232 | } | 254 | } |
233 | 255 | ||
234 | static struct crypto_alg aes_alg = { | 256 | static struct crypto_alg aes_alg = { |
@@ -261,7 +283,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |||
261 | int err; | 283 | int err; |
262 | int ts_state; | 284 | int ts_state; |
263 | 285 | ||
264 | padlock_reset_key(); | 286 | padlock_reset_key(&ctx->cword.encrypt); |
265 | 287 | ||
266 | blkcipher_walk_init(&walk, dst, src, nbytes); | 288 | blkcipher_walk_init(&walk, dst, src, nbytes); |
267 | err = blkcipher_walk_virt(desc, &walk); | 289 | err = blkcipher_walk_virt(desc, &walk); |
@@ -276,6 +298,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |||
276 | } | 298 | } |
277 | irq_ts_restore(ts_state); | 299 | irq_ts_restore(ts_state); |
278 | 300 | ||
301 | padlock_store_cword(&ctx->cword.encrypt); | ||
302 | |||
279 | return err; | 303 | return err; |
280 | } | 304 | } |
281 | 305 | ||
@@ -288,7 +312,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |||
288 | int err; | 312 | int err; |
289 | int ts_state; | 313 | int ts_state; |
290 | 314 | ||
291 | padlock_reset_key(); | 315 | padlock_reset_key(&ctx->cword.decrypt); |
292 | 316 | ||
293 | blkcipher_walk_init(&walk, dst, src, nbytes); | 317 | blkcipher_walk_init(&walk, dst, src, nbytes); |
294 | err = blkcipher_walk_virt(desc, &walk); | 318 | err = blkcipher_walk_virt(desc, &walk); |
@@ -302,6 +326,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |||
302 | err = blkcipher_walk_done(desc, &walk, nbytes); | 326 | err = blkcipher_walk_done(desc, &walk, nbytes); |
303 | } | 327 | } |
304 | irq_ts_restore(ts_state); | 328 | irq_ts_restore(ts_state); |
329 | |||
330 | padlock_store_cword(&ctx->cword.encrypt); | ||
331 | |||
305 | return err; | 332 | return err; |
306 | } | 333 | } |
307 | 334 | ||
@@ -336,7 +363,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |||
336 | int err; | 363 | int err; |
337 | int ts_state; | 364 | int ts_state; |
338 | 365 | ||
339 | padlock_reset_key(); | 366 | padlock_reset_key(&ctx->cword.encrypt); |
340 | 367 | ||
341 | blkcipher_walk_init(&walk, dst, src, nbytes); | 368 | blkcipher_walk_init(&walk, dst, src, nbytes); |
342 | err = blkcipher_walk_virt(desc, &walk); | 369 | err = blkcipher_walk_virt(desc, &walk); |
@@ -353,6 +380,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |||
353 | } | 380 | } |
354 | irq_ts_restore(ts_state); | 381 | irq_ts_restore(ts_state); |
355 | 382 | ||
383 | padlock_store_cword(&ctx->cword.decrypt); | ||
384 | |||
356 | return err; | 385 | return err; |
357 | } | 386 | } |
358 | 387 | ||
@@ -365,7 +394,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |||
365 | int err; | 394 | int err; |
366 | int ts_state; | 395 | int ts_state; |
367 | 396 | ||
368 | padlock_reset_key(); | 397 | padlock_reset_key(&ctx->cword.encrypt); |
369 | 398 | ||
370 | blkcipher_walk_init(&walk, dst, src, nbytes); | 399 | blkcipher_walk_init(&walk, dst, src, nbytes); |
371 | err = blkcipher_walk_virt(desc, &walk); | 400 | err = blkcipher_walk_virt(desc, &walk); |
@@ -380,6 +409,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |||
380 | } | 409 | } |
381 | 410 | ||
382 | irq_ts_restore(ts_state); | 411 | irq_ts_restore(ts_state); |
412 | |||
413 | padlock_store_cword(&ctx->cword.encrypt); | ||
414 | |||
383 | return err; | 415 | return err; |
384 | } | 416 | } |
385 | 417 | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 24607669a52b..a3918c16b3db 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -127,7 +127,6 @@ struct talitos_private { | |||
127 | 127 | ||
128 | /* request callback tasklet */ | 128 | /* request callback tasklet */ |
129 | struct tasklet_struct done_task; | 129 | struct tasklet_struct done_task; |
130 | struct tasklet_struct error_task; | ||
131 | 130 | ||
132 | /* list of registered algorithms */ | 131 | /* list of registered algorithms */ |
133 | struct list_head alg_list; | 132 | struct list_head alg_list; |
@@ -138,6 +137,7 @@ struct talitos_private { | |||
138 | 137 | ||
139 | /* .features flag */ | 138 | /* .features flag */ |
140 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 139 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
140 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | ||
141 | 141 | ||
142 | /* | 142 | /* |
143 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 143 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
@@ -184,6 +184,11 @@ static int reset_channel(struct device *dev, int ch) | |||
184 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | | 184 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | |
185 | TALITOS_CCCR_LO_CDIE); | 185 | TALITOS_CCCR_LO_CDIE); |
186 | 186 | ||
187 | /* and ICCR writeback, if available */ | ||
188 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | ||
189 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), | ||
190 | TALITOS_CCCR_LO_IWSE); | ||
191 | |||
187 | return 0; | 192 | return 0; |
188 | } | 193 | } |
189 | 194 | ||
@@ -239,6 +244,11 @@ static int init_device(struct device *dev) | |||
239 | setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); | 244 | setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); |
240 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); | 245 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); |
241 | 246 | ||
247 | /* disable integrity check error interrupts (use writeback instead) */ | ||
248 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | ||
249 | setbits32(priv->reg + TALITOS_MDEUICR_LO, | ||
250 | TALITOS_MDEUICR_LO_ICE); | ||
251 | |||
242 | return 0; | 252 | return 0; |
243 | } | 253 | } |
244 | 254 | ||
@@ -370,6 +380,12 @@ static void talitos_done(unsigned long data) | |||
370 | 380 | ||
371 | for (ch = 0; ch < priv->num_channels; ch++) | 381 | for (ch = 0; ch < priv->num_channels; ch++) |
372 | flush_channel(dev, ch, 0, 0); | 382 | flush_channel(dev, ch, 0, 0); |
383 | |||
384 | /* At this point, all completed channels have been processed. | ||
385 | * Unmask done interrupts for channels completed later on. | ||
386 | */ | ||
387 | setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); | ||
388 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); | ||
373 | } | 389 | } |
374 | 390 | ||
375 | /* | 391 | /* |
@@ -469,16 +485,13 @@ static void report_eu_error(struct device *dev, int ch, struct talitos_desc *des | |||
469 | /* | 485 | /* |
470 | * recover from error interrupts | 486 | * recover from error interrupts |
471 | */ | 487 | */ |
472 | static void talitos_error(unsigned long data) | 488 | static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) |
473 | { | 489 | { |
474 | struct device *dev = (struct device *)data; | 490 | struct device *dev = (struct device *)data; |
475 | struct talitos_private *priv = dev_get_drvdata(dev); | 491 | struct talitos_private *priv = dev_get_drvdata(dev); |
476 | unsigned int timeout = TALITOS_TIMEOUT; | 492 | unsigned int timeout = TALITOS_TIMEOUT; |
477 | int ch, error, reset_dev = 0, reset_ch = 0; | 493 | int ch, error, reset_dev = 0, reset_ch = 0; |
478 | u32 isr, isr_lo, v, v_lo; | 494 | u32 v, v_lo; |
479 | |||
480 | isr = in_be32(priv->reg + TALITOS_ISR); | ||
481 | isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); | ||
482 | 495 | ||
483 | for (ch = 0; ch < priv->num_channels; ch++) { | 496 | for (ch = 0; ch < priv->num_channels; ch++) { |
484 | /* skip channels without errors */ | 497 | /* skip channels without errors */ |
@@ -560,16 +573,19 @@ static irqreturn_t talitos_interrupt(int irq, void *data) | |||
560 | 573 | ||
561 | isr = in_be32(priv->reg + TALITOS_ISR); | 574 | isr = in_be32(priv->reg + TALITOS_ISR); |
562 | isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); | 575 | isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); |
563 | 576 | /* Acknowledge interrupt */ | |
564 | /* ack */ | ||
565 | out_be32(priv->reg + TALITOS_ICR, isr); | 577 | out_be32(priv->reg + TALITOS_ICR, isr); |
566 | out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); | 578 | out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); |
567 | 579 | ||
568 | if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) | 580 | if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) |
569 | talitos_error((unsigned long)data); | 581 | talitos_error((unsigned long)data, isr, isr_lo); |
570 | else | 582 | else |
571 | if (likely(isr & TALITOS_ISR_CHDONE)) | 583 | if (likely(isr & TALITOS_ISR_CHDONE)) { |
584 | /* mask further done interrupts. */ | ||
585 | clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE); | ||
586 | /* done_task will unmask done interrupts at exit */ | ||
572 | tasklet_schedule(&priv->done_task); | 587 | tasklet_schedule(&priv->done_task); |
588 | } | ||
573 | 589 | ||
574 | return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; | 590 | return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; |
575 | } | 591 | } |
@@ -802,7 +818,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
802 | aead_request_complete(areq, err); | 818 | aead_request_complete(areq, err); |
803 | } | 819 | } |
804 | 820 | ||
805 | static void ipsec_esp_decrypt_done(struct device *dev, | 821 | static void ipsec_esp_decrypt_swauth_done(struct device *dev, |
806 | struct talitos_desc *desc, void *context, | 822 | struct talitos_desc *desc, void *context, |
807 | int err) | 823 | int err) |
808 | { | 824 | { |
@@ -834,6 +850,27 @@ static void ipsec_esp_decrypt_done(struct device *dev, | |||
834 | aead_request_complete(req, err); | 850 | aead_request_complete(req, err); |
835 | } | 851 | } |
836 | 852 | ||
853 | static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | ||
854 | struct talitos_desc *desc, void *context, | ||
855 | int err) | ||
856 | { | ||
857 | struct aead_request *req = context; | ||
858 | struct ipsec_esp_edesc *edesc = | ||
859 | container_of(desc, struct ipsec_esp_edesc, desc); | ||
860 | |||
861 | ipsec_esp_unmap(dev, edesc, req); | ||
862 | |||
863 | /* check ICV auth status */ | ||
864 | if (!err) | ||
865 | if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != | ||
866 | DESC_HDR_LO_ICCR1_PASS) | ||
867 | err = -EBADMSG; | ||
868 | |||
869 | kfree(edesc); | ||
870 | |||
871 | aead_request_complete(req, err); | ||
872 | } | ||
873 | |||
837 | /* | 874 | /* |
838 | * convert scatterlist to SEC h/w link table format | 875 | * convert scatterlist to SEC h/w link table format |
839 | * stop at cryptlen bytes | 876 | * stop at cryptlen bytes |
@@ -887,6 +924,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
887 | unsigned int authsize = ctx->authsize; | 924 | unsigned int authsize = ctx->authsize; |
888 | unsigned int ivsize; | 925 | unsigned int ivsize; |
889 | int sg_count, ret; | 926 | int sg_count, ret; |
927 | int sg_link_tbl_len; | ||
890 | 928 | ||
891 | /* hmac key */ | 929 | /* hmac key */ |
892 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 930 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
@@ -924,33 +962,19 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
924 | if (sg_count == 1) { | 962 | if (sg_count == 1) { |
925 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 963 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); |
926 | } else { | 964 | } else { |
927 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | 965 | sg_link_tbl_len = cryptlen; |
966 | |||
967 | if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && | ||
968 | (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { | ||
969 | sg_link_tbl_len = cryptlen + authsize; | ||
970 | } | ||
971 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, | ||
928 | &edesc->link_tbl[0]); | 972 | &edesc->link_tbl[0]); |
929 | if (sg_count > 1) { | 973 | if (sg_count > 1) { |
930 | struct talitos_ptr *link_tbl_ptr = | ||
931 | &edesc->link_tbl[sg_count-1]; | ||
932 | struct scatterlist *sg; | ||
933 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
934 | |||
935 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 974 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
936 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 975 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); |
937 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 976 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
938 | edesc->dma_len, DMA_BIDIRECTIONAL); | 977 | edesc->dma_len, DMA_BIDIRECTIONAL); |
939 | /* If necessary for this SEC revision, | ||
940 | * add a link table entry for ICV. | ||
941 | */ | ||
942 | if ((priv->features & | ||
943 | TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) && | ||
944 | (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { | ||
945 | link_tbl_ptr->j_extent = 0; | ||
946 | link_tbl_ptr++; | ||
947 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | ||
948 | link_tbl_ptr->len = cpu_to_be16(authsize); | ||
949 | sg = sg_last(areq->src, edesc->src_nents ? : 1); | ||
950 | link_tbl_ptr->ptr = cpu_to_be32( | ||
951 | (char *)sg_dma_address(sg) | ||
952 | + sg->length - authsize); | ||
953 | } | ||
954 | } else { | 978 | } else { |
955 | /* Only one segment now, so no link tbl needed */ | 979 | /* Only one segment now, so no link tbl needed */ |
956 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 980 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); |
@@ -975,13 +999,9 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
975 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) | 999 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) |
976 | edesc->dma_link_tbl + | 1000 | edesc->dma_link_tbl + |
977 | edesc->src_nents + 1); | 1001 | edesc->src_nents + 1); |
978 | if (areq->src == areq->dst) { | 1002 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
979 | memcpy(link_tbl_ptr, &edesc->link_tbl[0], | 1003 | link_tbl_ptr); |
980 | edesc->src_nents * sizeof(struct talitos_ptr)); | 1004 | |
981 | } else { | ||
982 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | ||
983 | link_tbl_ptr); | ||
984 | } | ||
985 | /* Add an entry to the link table for ICV data */ | 1005 | /* Add an entry to the link table for ICV data */ |
986 | link_tbl_ptr += sg_count - 1; | 1006 | link_tbl_ptr += sg_count - 1; |
987 | link_tbl_ptr->j_extent = 0; | 1007 | link_tbl_ptr->j_extent = 0; |
@@ -1106,11 +1126,14 @@ static int aead_authenc_encrypt(struct aead_request *req) | |||
1106 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); | 1126 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); |
1107 | } | 1127 | } |
1108 | 1128 | ||
1129 | |||
1130 | |||
1109 | static int aead_authenc_decrypt(struct aead_request *req) | 1131 | static int aead_authenc_decrypt(struct aead_request *req) |
1110 | { | 1132 | { |
1111 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1133 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1112 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1134 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1113 | unsigned int authsize = ctx->authsize; | 1135 | unsigned int authsize = ctx->authsize; |
1136 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | ||
1114 | struct ipsec_esp_edesc *edesc; | 1137 | struct ipsec_esp_edesc *edesc; |
1115 | struct scatterlist *sg; | 1138 | struct scatterlist *sg; |
1116 | void *icvdata; | 1139 | void *icvdata; |
@@ -1122,22 +1145,39 @@ static int aead_authenc_decrypt(struct aead_request *req) | |||
1122 | if (IS_ERR(edesc)) | 1145 | if (IS_ERR(edesc)) |
1123 | return PTR_ERR(edesc); | 1146 | return PTR_ERR(edesc); |
1124 | 1147 | ||
1125 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1148 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && |
1126 | if (edesc->dma_len) | 1149 | (((!edesc->src_nents && !edesc->dst_nents) || |
1127 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1150 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { |
1128 | edesc->dst_nents + 2]; | 1151 | |
1129 | else | 1152 | /* decrypt and check the ICV */ |
1130 | icvdata = &edesc->link_tbl[0]; | 1153 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | |
1154 | DESC_HDR_MODE1_MDEU_CICV; | ||
1155 | |||
1156 | /* reset integrity check result bits */ | ||
1157 | edesc->desc.hdr_lo = 0; | ||
1158 | |||
1159 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); | ||
1160 | |||
1161 | } else { | ||
1162 | |||
1163 | /* Have to check the ICV with software */ | ||
1131 | 1164 | ||
1132 | sg = sg_last(req->src, edesc->src_nents ? : 1); | 1165 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; |
1166 | |||
1167 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | ||
1168 | if (edesc->dma_len) | ||
1169 | icvdata = &edesc->link_tbl[edesc->src_nents + | ||
1170 | edesc->dst_nents + 2]; | ||
1171 | else | ||
1172 | icvdata = &edesc->link_tbl[0]; | ||
1133 | 1173 | ||
1134 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, | 1174 | sg = sg_last(req->src, edesc->src_nents ? : 1); |
1135 | ctx->authsize); | ||
1136 | 1175 | ||
1137 | /* decrypt */ | 1176 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, |
1138 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1177 | ctx->authsize); |
1139 | 1178 | ||
1140 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done); | 1179 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); |
1180 | } | ||
1141 | } | 1181 | } |
1142 | 1182 | ||
1143 | static int aead_authenc_givencrypt( | 1183 | static int aead_authenc_givencrypt( |
@@ -1391,7 +1431,6 @@ static int talitos_remove(struct of_device *ofdev) | |||
1391 | } | 1431 | } |
1392 | 1432 | ||
1393 | tasklet_kill(&priv->done_task); | 1433 | tasklet_kill(&priv->done_task); |
1394 | tasklet_kill(&priv->error_task); | ||
1395 | 1434 | ||
1396 | iounmap(priv->reg); | 1435 | iounmap(priv->reg); |
1397 | 1436 | ||
@@ -1451,10 +1490,9 @@ static int talitos_probe(struct of_device *ofdev, | |||
1451 | 1490 | ||
1452 | priv->ofdev = ofdev; | 1491 | priv->ofdev = ofdev; |
1453 | 1492 | ||
1454 | INIT_LIST_HEAD(&priv->alg_list); | ||
1455 | |||
1456 | tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); | 1493 | tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); |
1457 | tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev); | 1494 | |
1495 | INIT_LIST_HEAD(&priv->alg_list); | ||
1458 | 1496 | ||
1459 | priv->irq = irq_of_parse_and_map(np, 0); | 1497 | priv->irq = irq_of_parse_and_map(np, 0); |
1460 | 1498 | ||
@@ -1508,6 +1546,9 @@ static int talitos_probe(struct of_device *ofdev, | |||
1508 | if (of_device_is_compatible(np, "fsl,sec3.0")) | 1546 | if (of_device_is_compatible(np, "fsl,sec3.0")) |
1509 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; | 1547 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; |
1510 | 1548 | ||
1549 | if (of_device_is_compatible(np, "fsl,sec2.1")) | ||
1550 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; | ||
1551 | |||
1511 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1552 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, |
1512 | GFP_KERNEL); | 1553 | GFP_KERNEL); |
1513 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1554 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, |
@@ -1551,7 +1592,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
1551 | goto err_out; | 1592 | goto err_out; |
1552 | } | 1593 | } |
1553 | for (i = 0; i < priv->num_channels; i++) | 1594 | for (i = 0; i < priv->num_channels; i++) |
1554 | atomic_set(&priv->submit_count[i], -priv->chfifo_len); | 1595 | atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); |
1555 | 1596 | ||
1556 | priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | 1597 | priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); |
1557 | priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | 1598 | priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index c48a405abf70..575981f0cfda 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -37,7 +37,8 @@ | |||
37 | #define TALITOS_MCR_LO 0x1038 | 37 | #define TALITOS_MCR_LO 0x1038 |
38 | #define TALITOS_MCR_SWR 0x1 /* s/w reset */ | 38 | #define TALITOS_MCR_SWR 0x1 /* s/w reset */ |
39 | #define TALITOS_IMR 0x1008 /* interrupt mask register */ | 39 | #define TALITOS_IMR 0x1008 /* interrupt mask register */ |
40 | #define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */ | 40 | #define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ |
41 | #define TALITOS_IMR_DONE 0x00055 /* done IRQs */ | ||
41 | #define TALITOS_IMR_LO 0x100C | 42 | #define TALITOS_IMR_LO 0x100C |
42 | #define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ | 43 | #define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ |
43 | #define TALITOS_ISR 0x1010 /* interrupt status register */ | 44 | #define TALITOS_ISR 0x1010 /* interrupt status register */ |
@@ -55,6 +56,7 @@ | |||
55 | #define TALITOS_CCCR_CONT 0x2 /* channel continue */ | 56 | #define TALITOS_CCCR_CONT 0x2 /* channel continue */ |
56 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ | 57 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ |
57 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) | 58 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) |
59 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ | ||
58 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ | 60 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ |
59 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ | 61 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ |
60 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ | 62 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ |
@@ -102,6 +104,9 @@ | |||
102 | #define TALITOS_AESUISR_LO 0x4034 | 104 | #define TALITOS_AESUISR_LO 0x4034 |
103 | #define TALITOS_MDEUISR 0x6030 /* message digest unit */ | 105 | #define TALITOS_MDEUISR 0x6030 /* message digest unit */ |
104 | #define TALITOS_MDEUISR_LO 0x6034 | 106 | #define TALITOS_MDEUISR_LO 0x6034 |
107 | #define TALITOS_MDEUICR 0x6038 /* interrupt control */ | ||
108 | #define TALITOS_MDEUICR_LO 0x603c | ||
109 | #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ | ||
105 | #define TALITOS_AFEUISR 0x8030 /* arc4 unit */ | 110 | #define TALITOS_AFEUISR 0x8030 /* arc4 unit */ |
106 | #define TALITOS_AFEUISR_LO 0x8034 | 111 | #define TALITOS_AFEUISR_LO 0x8034 |
107 | #define TALITOS_RNGUISR 0xa030 /* random number unit */ | 112 | #define TALITOS_RNGUISR 0xa030 /* random number unit */ |
@@ -129,31 +134,34 @@ | |||
129 | */ | 134 | */ |
130 | 135 | ||
131 | /* written back when done */ | 136 | /* written back when done */ |
132 | #define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000) | 137 | #define DESC_HDR_DONE cpu_to_be32(0xff000000) |
138 | #define DESC_HDR_LO_ICCR1_MASK cpu_to_be32(0x00180000) | ||
139 | #define DESC_HDR_LO_ICCR1_PASS cpu_to_be32(0x00080000) | ||
140 | #define DESC_HDR_LO_ICCR1_FAIL cpu_to_be32(0x00100000) | ||
133 | 141 | ||
134 | /* primary execution unit select */ | 142 | /* primary execution unit select */ |
135 | #define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000) | 143 | #define DESC_HDR_SEL0_MASK cpu_to_be32(0xf0000000) |
136 | #define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000) | 144 | #define DESC_HDR_SEL0_AFEU cpu_to_be32(0x10000000) |
137 | #define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000) | 145 | #define DESC_HDR_SEL0_DEU cpu_to_be32(0x20000000) |
138 | #define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000) | 146 | #define DESC_HDR_SEL0_MDEUA cpu_to_be32(0x30000000) |
139 | #define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000) | 147 | #define DESC_HDR_SEL0_MDEUB cpu_to_be32(0xb0000000) |
140 | #define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000) | 148 | #define DESC_HDR_SEL0_RNG cpu_to_be32(0x40000000) |
141 | #define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000) | 149 | #define DESC_HDR_SEL0_PKEU cpu_to_be32(0x50000000) |
142 | #define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000) | 150 | #define DESC_HDR_SEL0_AESU cpu_to_be32(0x60000000) |
143 | #define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000) | 151 | #define DESC_HDR_SEL0_KEU cpu_to_be32(0x70000000) |
144 | #define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000) | 152 | #define DESC_HDR_SEL0_CRCU cpu_to_be32(0x80000000) |
145 | 153 | ||
146 | /* primary execution unit mode (MODE0) and derivatives */ | 154 | /* primary execution unit mode (MODE0) and derivatives */ |
147 | #define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000) | 155 | #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) |
148 | #define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000) | 156 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) |
149 | #define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000) | 157 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) |
150 | #define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000) | 158 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) |
151 | #define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000) | 159 | #define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) |
152 | #define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000) | 160 | #define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) |
153 | #define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000) | 161 | #define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) |
154 | #define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000) | 162 | #define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) |
155 | #define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000) | 163 | #define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) |
156 | #define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) | 164 | #define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) |
157 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ | 165 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ |
158 | DESC_HDR_MODE0_MDEU_HMAC) | 166 | DESC_HDR_MODE0_MDEU_HMAC) |
159 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ | 167 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ |
@@ -162,18 +170,19 @@ | |||
162 | DESC_HDR_MODE0_MDEU_HMAC) | 170 | DESC_HDR_MODE0_MDEU_HMAC) |
163 | 171 | ||
164 | /* secondary execution unit select (SEL1) */ | 172 | /* secondary execution unit select (SEL1) */ |
165 | #define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000) | 173 | #define DESC_HDR_SEL1_MASK cpu_to_be32(0x000f0000) |
166 | #define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000) | 174 | #define DESC_HDR_SEL1_MDEUA cpu_to_be32(0x00030000) |
167 | #define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000) | 175 | #define DESC_HDR_SEL1_MDEUB cpu_to_be32(0x000b0000) |
168 | #define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000) | 176 | #define DESC_HDR_SEL1_CRCU cpu_to_be32(0x00080000) |
169 | 177 | ||
170 | /* secondary execution unit mode (MODE1) and derivatives */ | 178 | /* secondary execution unit mode (MODE1) and derivatives */ |
171 | #define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000) | 179 | #define DESC_HDR_MODE1_MDEU_CICV cpu_to_be32(0x00004000) |
172 | #define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800) | 180 | #define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) |
173 | #define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400) | 181 | #define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) |
174 | #define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200) | 182 | #define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) |
175 | #define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100) | 183 | #define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) |
176 | #define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) | 184 | #define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) |
185 | #define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) | ||
177 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ | 186 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ |
178 | DESC_HDR_MODE1_MDEU_HMAC) | 187 | DESC_HDR_MODE1_MDEU_HMAC) |
179 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ | 188 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ |
@@ -182,16 +191,16 @@ | |||
182 | DESC_HDR_MODE1_MDEU_HMAC) | 191 | DESC_HDR_MODE1_MDEU_HMAC) |
183 | 192 | ||
184 | /* direction of overall data flow (DIR) */ | 193 | /* direction of overall data flow (DIR) */ |
185 | #define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002) | 194 | #define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) |
186 | 195 | ||
187 | /* request done notification (DN) */ | 196 | /* request done notification (DN) */ |
188 | #define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001) | 197 | #define DESC_HDR_DONE_NOTIFY cpu_to_be32(0x00000001) |
189 | 198 | ||
190 | /* descriptor types */ | 199 | /* descriptor types */ |
191 | #define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3) | 200 | #define DESC_HDR_TYPE_AESU_CTR_NONSNOOP cpu_to_be32(0 << 3) |
192 | #define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3) | 201 | #define DESC_HDR_TYPE_IPSEC_ESP cpu_to_be32(1 << 3) |
193 | #define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3) | 202 | #define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU cpu_to_be32(2 << 3) |
194 | #define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3) | 203 | #define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU cpu_to_be32(4 << 3) |
195 | 204 | ||
196 | /* link table extent field bits */ | 205 | /* link table extent field bits */ |
197 | #define DESC_PTR_LNKTBL_JUMP 0x80 | 206 | #define DESC_PTR_LNKTBL_JUMP 0x80 |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5317e08221ec..657996517374 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device) | |||
388 | 388 | ||
389 | init_completion(&device->done); | 389 | init_completion(&device->done); |
390 | kref_init(&device->refcount); | 390 | kref_init(&device->refcount); |
391 | |||
392 | mutex_lock(&dma_list_mutex); | ||
391 | device->dev_id = id++; | 393 | device->dev_id = id++; |
394 | mutex_unlock(&dma_list_mutex); | ||
392 | 395 | ||
393 | /* represent channels in sysfs. Probably want devs too */ | 396 | /* represent channels in sysfs. Probably want devs too */ |
394 | list_for_each_entry(chan, &device->channels, device_node) { | 397 | list_for_each_entry(chan, &device->channels, device_node) { |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index ecd743f7cc61..6607fdd00b1c 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1341 | */ | 1341 | */ |
1342 | #define IOAT_TEST_SIZE 2000 | 1342 | #define IOAT_TEST_SIZE 2000 |
1343 | 1343 | ||
1344 | DECLARE_COMPLETION(test_completion); | ||
1344 | static void ioat_dma_test_callback(void *dma_async_param) | 1345 | static void ioat_dma_test_callback(void *dma_async_param) |
1345 | { | 1346 | { |
1346 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", | 1347 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", |
1347 | dma_async_param); | 1348 | dma_async_param); |
1349 | complete(&test_completion); | ||
1348 | } | 1350 | } |
1349 | 1351 | ||
1350 | /** | 1352 | /** |
@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1410 | goto free_resources; | 1412 | goto free_resources; |
1411 | } | 1413 | } |
1412 | device->common.device_issue_pending(dma_chan); | 1414 | device->common.device_issue_pending(dma_chan); |
1413 | msleep(1); | 1415 | |
1416 | wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); | ||
1414 | 1417 | ||
1415 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | 1418 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) |
1416 | != DMA_SUCCESS) { | 1419 | != DMA_SUCCESS) { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c7a9306d951d..6be317262200 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
85 | enum dma_ctrl_flags flags = desc->async_tx.flags; | 85 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
86 | u32 src_cnt; | 86 | u32 src_cnt; |
87 | dma_addr_t addr; | 87 | dma_addr_t addr; |
88 | dma_addr_t dest; | ||
88 | 89 | ||
90 | src_cnt = unmap->unmap_src_cnt; | ||
91 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
89 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 92 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
90 | addr = iop_desc_get_dest_addr(unmap, iop_chan); | 93 | enum dma_data_direction dir; |
91 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | 94 | |
95 | if (src_cnt > 1) /* is xor? */ | ||
96 | dir = DMA_BIDIRECTIONAL; | ||
97 | else | ||
98 | dir = DMA_FROM_DEVICE; | ||
99 | |||
100 | dma_unmap_page(dev, dest, len, dir); | ||
92 | } | 101 | } |
93 | 102 | ||
94 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 103 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
95 | src_cnt = unmap->unmap_src_cnt; | ||
96 | while (src_cnt--) { | 104 | while (src_cnt--) { |
97 | addr = iop_desc_get_src_addr(unmap, | 105 | addr = iop_desc_get_src_addr(unmap, |
98 | iop_chan, | 106 | iop_chan, |
99 | src_cnt); | 107 | src_cnt); |
108 | if (addr == dest) | ||
109 | continue; | ||
100 | dma_unmap_page(dev, addr, len, | 110 | dma_unmap_page(dev, addr, len, |
101 | DMA_TO_DEVICE); | 111 | DMA_TO_DEVICE); |
102 | } | 112 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0328da020a10..bcda17426411 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
311 | enum dma_ctrl_flags flags = desc->async_tx.flags; | 311 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
312 | u32 src_cnt; | 312 | u32 src_cnt; |
313 | dma_addr_t addr; | 313 | dma_addr_t addr; |
314 | dma_addr_t dest; | ||
314 | 315 | ||
316 | src_cnt = unmap->unmap_src_cnt; | ||
317 | dest = mv_desc_get_dest_addr(unmap); | ||
315 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 318 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
316 | addr = mv_desc_get_dest_addr(unmap); | 319 | enum dma_data_direction dir; |
317 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | 320 | |
321 | if (src_cnt > 1) /* is xor ? */ | ||
322 | dir = DMA_BIDIRECTIONAL; | ||
323 | else | ||
324 | dir = DMA_FROM_DEVICE; | ||
325 | dma_unmap_page(dev, dest, len, dir); | ||
318 | } | 326 | } |
319 | 327 | ||
320 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 328 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
321 | src_cnt = unmap->unmap_src_cnt; | ||
322 | while (src_cnt--) { | 329 | while (src_cnt--) { |
323 | addr = mv_desc_get_src_addr(unmap, | 330 | addr = mv_desc_get_src_addr(unmap, |
324 | src_cnt); | 331 | src_cnt); |
332 | if (addr == dest) | ||
333 | continue; | ||
325 | dma_unmap_page(dev, addr, len, | 334 | dma_unmap_page(dev, addr, len, |
326 | DMA_TO_DEVICE); | 335 | DMA_TO_DEVICE); |
327 | } | 336 | } |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 5fcd3d89c75d..4041e9143283 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -394,6 +394,12 @@ static void edac_device_workq_function(struct work_struct *work_req) | |||
394 | 394 | ||
395 | mutex_lock(&device_ctls_mutex); | 395 | mutex_lock(&device_ctls_mutex); |
396 | 396 | ||
397 | /* If we are being removed, bail out immediately */ | ||
398 | if (edac_dev->op_state == OP_OFFLINE) { | ||
399 | mutex_unlock(&device_ctls_mutex); | ||
400 | return; | ||
401 | } | ||
402 | |||
397 | /* Only poll controllers that are running polled and have a check */ | 403 | /* Only poll controllers that are running polled and have a check */ |
398 | if ((edac_dev->op_state == OP_RUNNING_POLL) && | 404 | if ((edac_dev->op_state == OP_RUNNING_POLL) && |
399 | (edac_dev->edac_check != NULL)) { | 405 | (edac_dev->edac_check != NULL)) { |
@@ -585,14 +591,14 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) | |||
585 | /* mark this instance as OFFLINE */ | 591 | /* mark this instance as OFFLINE */ |
586 | edac_dev->op_state = OP_OFFLINE; | 592 | edac_dev->op_state = OP_OFFLINE; |
587 | 593 | ||
588 | /* clear workq processing on this instance */ | ||
589 | edac_device_workq_teardown(edac_dev); | ||
590 | |||
591 | /* deregister from global list */ | 594 | /* deregister from global list */ |
592 | del_edac_device_from_global_list(edac_dev); | 595 | del_edac_device_from_global_list(edac_dev); |
593 | 596 | ||
594 | mutex_unlock(&device_ctls_mutex); | 597 | mutex_unlock(&device_ctls_mutex); |
595 | 598 | ||
599 | /* clear workq processing on this instance */ | ||
600 | edac_device_workq_teardown(edac_dev); | ||
601 | |||
596 | /* Tear down the sysfs entries for this instance */ | 602 | /* Tear down the sysfs entries for this instance */ |
597 | edac_device_remove_sysfs(edac_dev); | 603 | edac_device_remove_sysfs(edac_dev); |
598 | 604 | ||
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 8daf4793ac32..4a597d8c2f70 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -467,6 +467,17 @@ const char *dmi_get_system_info(int field) | |||
467 | } | 467 | } |
468 | EXPORT_SYMBOL(dmi_get_system_info); | 468 | EXPORT_SYMBOL(dmi_get_system_info); |
469 | 469 | ||
470 | /** | ||
471 | * dmi_name_in_serial - Check if string is in the DMI product serial | ||
472 | * information. | ||
473 | */ | ||
474 | int dmi_name_in_serial(const char *str) | ||
475 | { | ||
476 | int f = DMI_PRODUCT_SERIAL; | ||
477 | if (dmi_ident[f] && strstr(dmi_ident[f], str)) | ||
478 | return 1; | ||
479 | return 0; | ||
480 | } | ||
470 | 481 | ||
471 | /** | 482 | /** |
472 | * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. | 483 | * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 553dd4bc3075..afa8a12cd009 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
717 | value = dev->pci_device; | 717 | value = dev->pci_device; |
718 | break; | 718 | break; |
719 | case I915_PARAM_HAS_GEM: | 719 | case I915_PARAM_HAS_GEM: |
720 | value = 1; | 720 | value = dev_priv->has_gem; |
721 | break; | 721 | break; |
722 | default: | 722 | default: |
723 | DRM_ERROR("Unknown parameter %d\n", param->param); | 723 | DRM_ERROR("Unknown parameter %d\n", param->param); |
@@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
830 | 830 | ||
831 | dev_priv->regs = ioremap(base, size); | 831 | dev_priv->regs = ioremap(base, size); |
832 | 832 | ||
833 | #ifdef CONFIG_HIGHMEM64G | ||
834 | /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ | ||
835 | dev_priv->has_gem = 0; | ||
836 | #else | ||
837 | /* enable GEM by default */ | ||
838 | dev_priv->has_gem = 1; | ||
839 | #endif | ||
840 | |||
833 | i915_gem_load(dev); | 841 | i915_gem_load(dev); |
834 | 842 | ||
835 | /* Init HWS */ | 843 | /* Init HWS */ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index adc972cc6bfc..b3cc4731aa7c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -106,6 +106,8 @@ struct intel_opregion { | |||
106 | typedef struct drm_i915_private { | 106 | typedef struct drm_i915_private { |
107 | struct drm_device *dev; | 107 | struct drm_device *dev; |
108 | 108 | ||
109 | int has_gem; | ||
110 | |||
109 | void __iomem *regs; | 111 | void __iomem *regs; |
110 | drm_local_map_t *sarea; | 112 | drm_local_map_t *sarea; |
111 | 113 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad672d854828..24fe8c10b4b2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2309,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
2309 | } | 2309 | } |
2310 | 2310 | ||
2311 | obj_priv = obj->driver_private; | 2311 | obj_priv = obj->driver_private; |
2312 | args->busy = obj_priv->active; | 2312 | /* Don't count being on the flushing list against the object being |
2313 | * done. Otherwise, a buffer left on the flushing list but not getting | ||
2314 | * flushed (because nobody's flushing that domain) won't ever return | ||
2315 | * unbusy and get reused by libdrm's bo cache. The other expected | ||
2316 | * consumer of this interface, OpenGL's occlusion queries, also specs | ||
2317 | * that the objects get unbusy "eventually" without any interference. | ||
2318 | */ | ||
2319 | args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; | ||
2313 | 2320 | ||
2314 | drm_gem_object_unreference(obj); | 2321 | drm_gem_object_unreference(obj); |
2315 | mutex_unlock(&dev->struct_mutex); | 2322 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 99be11418ac2..8289e16419a8 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -44,7 +44,7 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) | |||
44 | else | 44 | else |
45 | dev_priv->irq_enable_reg &= ~mask; | 45 | dev_priv->irq_enable_reg &= ~mask; |
46 | 46 | ||
47 | if (!dev->irq_enabled) | 47 | if (dev->irq_enabled) |
48 | RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); | 48 | RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); |
49 | } | 49 | } |
50 | 50 | ||
@@ -57,7 +57,7 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) | |||
57 | else | 57 | else |
58 | dev_priv->r500_disp_irq_reg &= ~mask; | 58 | dev_priv->r500_disp_irq_reg &= ~mask; |
59 | 59 | ||
60 | if (!dev->irq_enabled) | 60 | if (dev->irq_enabled) |
61 | RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); | 61 | RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); |
62 | } | 62 | } |
63 | 63 | ||
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 228f75723063..3fcf78e906db 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
@@ -365,6 +365,7 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | |||
365 | pmsg = &msgs[tptr]; | 365 | pmsg = &msgs[tptr]; |
366 | if (pmsg->flags & I2C_M_RD) | 366 | if (pmsg->flags & I2C_M_RD) |
367 | ret = wait_event_interruptible_timeout(cpm->i2c_wait, | 367 | ret = wait_event_interruptible_timeout(cpm->i2c_wait, |
368 | (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) || | ||
368 | !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), | 369 | !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), |
369 | 1 * HZ); | 370 | 1 * HZ); |
370 | else | 371 | else |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 1fac4e233133..b7434d24904e 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -56,6 +56,7 @@ enum s3c24xx_i2c_state { | |||
56 | struct s3c24xx_i2c { | 56 | struct s3c24xx_i2c { |
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | wait_queue_head_t wait; | 58 | wait_queue_head_t wait; |
59 | unsigned int suspended:1; | ||
59 | 60 | ||
60 | struct i2c_msg *msg; | 61 | struct i2c_msg *msg; |
61 | unsigned int msg_num; | 62 | unsigned int msg_num; |
@@ -507,7 +508,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int | |||
507 | unsigned long timeout; | 508 | unsigned long timeout; |
508 | int ret; | 509 | int ret; |
509 | 510 | ||
510 | if (!(readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQEN)) | 511 | if (i2c->suspended) |
511 | return -EIO; | 512 | return -EIO; |
512 | 513 | ||
513 | ret = s3c24xx_i2c_set_master(i2c); | 514 | ret = s3c24xx_i2c_set_master(i2c); |
@@ -986,17 +987,26 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) | |||
986 | } | 987 | } |
987 | 988 | ||
988 | #ifdef CONFIG_PM | 989 | #ifdef CONFIG_PM |
990 | static int s3c24xx_i2c_suspend_late(struct platform_device *dev, | ||
991 | pm_message_t msg) | ||
992 | { | ||
993 | struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); | ||
994 | i2c->suspended = 1; | ||
995 | return 0; | ||
996 | } | ||
997 | |||
989 | static int s3c24xx_i2c_resume(struct platform_device *dev) | 998 | static int s3c24xx_i2c_resume(struct platform_device *dev) |
990 | { | 999 | { |
991 | struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); | 1000 | struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); |
992 | 1001 | ||
993 | if (i2c != NULL) | 1002 | i2c->suspended = 0; |
994 | s3c24xx_i2c_init(i2c); | 1003 | s3c24xx_i2c_init(i2c); |
995 | 1004 | ||
996 | return 0; | 1005 | return 0; |
997 | } | 1006 | } |
998 | 1007 | ||
999 | #else | 1008 | #else |
1009 | #define s3c24xx_i2c_suspend_late NULL | ||
1000 | #define s3c24xx_i2c_resume NULL | 1010 | #define s3c24xx_i2c_resume NULL |
1001 | #endif | 1011 | #endif |
1002 | 1012 | ||
@@ -1005,6 +1015,7 @@ static int s3c24xx_i2c_resume(struct platform_device *dev) | |||
1005 | static struct platform_driver s3c2410_i2c_driver = { | 1015 | static struct platform_driver s3c2410_i2c_driver = { |
1006 | .probe = s3c24xx_i2c_probe, | 1016 | .probe = s3c24xx_i2c_probe, |
1007 | .remove = s3c24xx_i2c_remove, | 1017 | .remove = s3c24xx_i2c_remove, |
1018 | .suspend_late = s3c24xx_i2c_suspend_late, | ||
1008 | .resume = s3c24xx_i2c_resume, | 1019 | .resume = s3c24xx_i2c_resume, |
1009 | .driver = { | 1020 | .driver = { |
1010 | .owner = THIS_MODULE, | 1021 | .owner = THIS_MODULE, |
@@ -1015,6 +1026,7 @@ static struct platform_driver s3c2410_i2c_driver = { | |||
1015 | static struct platform_driver s3c2440_i2c_driver = { | 1026 | static struct platform_driver s3c2440_i2c_driver = { |
1016 | .probe = s3c24xx_i2c_probe, | 1027 | .probe = s3c24xx_i2c_probe, |
1017 | .remove = s3c24xx_i2c_remove, | 1028 | .remove = s3c24xx_i2c_remove, |
1029 | .suspend_late = s3c24xx_i2c_suspend_late, | ||
1018 | .resume = s3c24xx_i2c_resume, | 1030 | .resume = s3c24xx_i2c_resume, |
1019 | .driver = { | 1031 | .driver = { |
1020 | .owner = THIS_MODULE, | 1032 | .owner = THIS_MODULE, |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 3384a717fec0..6c3d60b939bf 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -160,9 +160,39 @@ struct sh_mobile_i2c_data { | |||
160 | 160 | ||
161 | static void activate_ch(struct sh_mobile_i2c_data *pd) | 161 | static void activate_ch(struct sh_mobile_i2c_data *pd) |
162 | { | 162 | { |
163 | unsigned long i2c_clk; | ||
164 | u_int32_t num; | ||
165 | u_int32_t denom; | ||
166 | u_int32_t tmp; | ||
167 | |||
163 | /* Make sure the clock is enabled */ | 168 | /* Make sure the clock is enabled */ |
164 | clk_enable(pd->clk); | 169 | clk_enable(pd->clk); |
165 | 170 | ||
171 | /* Get clock rate after clock is enabled */ | ||
172 | i2c_clk = clk_get_rate(pd->clk); | ||
173 | |||
174 | /* Calculate the value for iccl. From the data sheet: | ||
175 | * iccl = (p clock / transfer rate) * (L / (L + H)) | ||
176 | * where L and H are the SCL low/high ratio (5/4 in this case). | ||
177 | * We also round off the result. | ||
178 | */ | ||
179 | num = i2c_clk * 5; | ||
180 | denom = NORMAL_SPEED * 9; | ||
181 | tmp = num * 10 / denom; | ||
182 | if (tmp % 10 >= 5) | ||
183 | pd->iccl = (u_int8_t)((num/denom) + 1); | ||
184 | else | ||
185 | pd->iccl = (u_int8_t)(num/denom); | ||
186 | |||
187 | /* Calculate the value for icch. From the data sheet: | ||
188 | icch = (p clock / transfer rate) * (H / (L + H)) */ | ||
189 | num = i2c_clk * 4; | ||
190 | tmp = num * 10 / denom; | ||
191 | if (tmp % 10 >= 5) | ||
192 | pd->icch = (u_int8_t)((num/denom) + 1); | ||
193 | else | ||
194 | pd->icch = (u_int8_t)(num/denom); | ||
195 | |||
166 | /* Enable channel and configure rx ack */ | 196 | /* Enable channel and configure rx ack */ |
167 | iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); | 197 | iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); |
168 | 198 | ||
@@ -459,40 +489,6 @@ static struct i2c_algorithm sh_mobile_i2c_algorithm = { | |||
459 | .master_xfer = sh_mobile_i2c_xfer, | 489 | .master_xfer = sh_mobile_i2c_xfer, |
460 | }; | 490 | }; |
461 | 491 | ||
462 | static void sh_mobile_i2c_setup_channel(struct platform_device *dev) | ||
463 | { | ||
464 | struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); | ||
465 | unsigned long peripheral_clk = clk_get_rate(pd->clk); | ||
466 | u_int32_t num; | ||
467 | u_int32_t denom; | ||
468 | u_int32_t tmp; | ||
469 | |||
470 | spin_lock_init(&pd->lock); | ||
471 | init_waitqueue_head(&pd->wait); | ||
472 | |||
473 | /* Calculate the value for iccl. From the data sheet: | ||
474 | * iccl = (p clock / transfer rate) * (L / (L + H)) | ||
475 | * where L and H are the SCL low/high ratio (5/4 in this case). | ||
476 | * We also round off the result. | ||
477 | */ | ||
478 | num = peripheral_clk * 5; | ||
479 | denom = NORMAL_SPEED * 9; | ||
480 | tmp = num * 10 / denom; | ||
481 | if (tmp % 10 >= 5) | ||
482 | pd->iccl = (u_int8_t)((num/denom) + 1); | ||
483 | else | ||
484 | pd->iccl = (u_int8_t)(num/denom); | ||
485 | |||
486 | /* Calculate the value for icch. From the data sheet: | ||
487 | icch = (p clock / transfer rate) * (H / (L + H)) */ | ||
488 | num = peripheral_clk * 4; | ||
489 | tmp = num * 10 / denom; | ||
490 | if (tmp % 10 >= 5) | ||
491 | pd->icch = (u_int8_t)((num/denom) + 1); | ||
492 | else | ||
493 | pd->icch = (u_int8_t)(num/denom); | ||
494 | } | ||
495 | |||
496 | static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook) | 492 | static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook) |
497 | { | 493 | { |
498 | struct resource *res; | 494 | struct resource *res; |
@@ -533,6 +529,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) | |||
533 | struct sh_mobile_i2c_data *pd; | 529 | struct sh_mobile_i2c_data *pd; |
534 | struct i2c_adapter *adap; | 530 | struct i2c_adapter *adap; |
535 | struct resource *res; | 531 | struct resource *res; |
532 | char clk_name[8]; | ||
536 | int size; | 533 | int size; |
537 | int ret; | 534 | int ret; |
538 | 535 | ||
@@ -542,9 +539,10 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) | |||
542 | return -ENOMEM; | 539 | return -ENOMEM; |
543 | } | 540 | } |
544 | 541 | ||
545 | pd->clk = clk_get(&dev->dev, "peripheral_clk"); | 542 | snprintf(clk_name, sizeof(clk_name), "i2c%d", dev->id); |
543 | pd->clk = clk_get(&dev->dev, clk_name); | ||
546 | if (IS_ERR(pd->clk)) { | 544 | if (IS_ERR(pd->clk)) { |
547 | dev_err(&dev->dev, "cannot get peripheral clock\n"); | 545 | dev_err(&dev->dev, "cannot get clock \"%s\"\n", clk_name); |
548 | ret = PTR_ERR(pd->clk); | 546 | ret = PTR_ERR(pd->clk); |
549 | goto err; | 547 | goto err; |
550 | } | 548 | } |
@@ -586,7 +584,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) | |||
586 | 584 | ||
587 | strlcpy(adap->name, dev->name, sizeof(adap->name)); | 585 | strlcpy(adap->name, dev->name, sizeof(adap->name)); |
588 | 586 | ||
589 | sh_mobile_i2c_setup_channel(dev); | 587 | spin_lock_init(&pd->lock); |
588 | init_waitqueue_head(&pd->wait); | ||
590 | 589 | ||
591 | ret = i2c_add_numbered_adapter(adap); | 590 | ret = i2c_add_numbered_adapter(adap); |
592 | if (ret < 0) { | 591 | if (ret < 0) { |
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c index 53f079cc00af..d8ede85fe17f 100644 --- a/drivers/ide/cs5530.c +++ b/drivers/ide/cs5530.c | |||
@@ -81,11 +81,12 @@ static u8 cs5530_udma_filter(ide_drive_t *drive) | |||
81 | { | 81 | { |
82 | ide_hwif_t *hwif = drive->hwif; | 82 | ide_hwif_t *hwif = drive->hwif; |
83 | ide_drive_t *mate = ide_get_pair_dev(drive); | 83 | ide_drive_t *mate = ide_get_pair_dev(drive); |
84 | u16 *mateid = mate->id; | 84 | u16 *mateid; |
85 | u8 mask = hwif->ultra_mask; | 85 | u8 mask = hwif->ultra_mask; |
86 | 86 | ||
87 | if (mate == NULL) | 87 | if (mate == NULL) |
88 | goto out; | 88 | goto out; |
89 | mateid = mate->id; | ||
89 | 90 | ||
90 | if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { | 91 | if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { |
91 | if ((mateid[ATA_ID_FIELD_VALID] & 4) && | 92 | if ((mateid[ATA_ID_FIELD_VALID] & 4) && |
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c index f1a8758e3a99..ec7f766ef5e4 100644 --- a/drivers/ide/sc1200.c +++ b/drivers/ide/sc1200.c | |||
@@ -104,11 +104,12 @@ static u8 sc1200_udma_filter(ide_drive_t *drive) | |||
104 | { | 104 | { |
105 | ide_hwif_t *hwif = drive->hwif; | 105 | ide_hwif_t *hwif = drive->hwif; |
106 | ide_drive_t *mate = ide_get_pair_dev(drive); | 106 | ide_drive_t *mate = ide_get_pair_dev(drive); |
107 | u16 *mateid = mate->id; | 107 | u16 *mateid; |
108 | u8 mask = hwif->ultra_mask; | 108 | u8 mask = hwif->ultra_mask; |
109 | 109 | ||
110 | if (mate == NULL) | 110 | if (mate == NULL) |
111 | goto out; | 111 | goto out; |
112 | mateid = mate->id; | ||
112 | 113 | ||
113 | if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { | 114 | if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { |
114 | if ((mateid[ATA_ID_FIELD_VALID] & 4) && | 115 | if ((mateid[ATA_ID_FIELD_VALID] & 4) && |
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index d333ae22459c..79ef5fd928ae 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c | |||
@@ -115,8 +115,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length, | |||
115 | return error; | 115 | return error; |
116 | } | 116 | } |
117 | 117 | ||
118 | #define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db | ||
119 | |||
118 | static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) | 120 | static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) |
119 | { | 121 | { |
122 | /* Freecom FireWire Hard Drive firmware bug */ | ||
123 | if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH) | ||
124 | return 0; | ||
125 | |||
120 | return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3; | 126 | return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3; |
121 | } | 127 | } |
122 | 128 | ||
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 09a2bec7fd32..d98b05b28262 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <net/neighbour.h> | 41 | #include <net/neighbour.h> |
42 | #include <net/route.h> | 42 | #include <net/route.h> |
43 | #include <net/netevent.h> | 43 | #include <net/netevent.h> |
44 | #include <net/addrconf.h> | ||
45 | #include <net/ip6_route.h> | ||
44 | #include <rdma/ib_addr.h> | 46 | #include <rdma/ib_addr.h> |
45 | 47 | ||
46 | MODULE_AUTHOR("Sean Hefty"); | 48 | MODULE_AUTHOR("Sean Hefty"); |
@@ -49,8 +51,8 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
49 | 51 | ||
50 | struct addr_req { | 52 | struct addr_req { |
51 | struct list_head list; | 53 | struct list_head list; |
52 | struct sockaddr src_addr; | 54 | struct sockaddr_storage src_addr; |
53 | struct sockaddr dst_addr; | 55 | struct sockaddr_storage dst_addr; |
54 | struct rdma_dev_addr *addr; | 56 | struct rdma_dev_addr *addr; |
55 | struct rdma_addr_client *client; | 57 | struct rdma_addr_client *client; |
56 | void *context; | 58 | void *context; |
@@ -113,15 +115,32 @@ EXPORT_SYMBOL(rdma_copy_addr); | |||
113 | int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | 115 | int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) |
114 | { | 116 | { |
115 | struct net_device *dev; | 117 | struct net_device *dev; |
116 | __be32 ip = ((struct sockaddr_in *) addr)->sin_addr.s_addr; | 118 | int ret = -EADDRNOTAVAIL; |
117 | int ret; | ||
118 | 119 | ||
119 | dev = ip_dev_find(&init_net, ip); | 120 | switch (addr->sa_family) { |
120 | if (!dev) | 121 | case AF_INET: |
121 | return -EADDRNOTAVAIL; | 122 | dev = ip_dev_find(&init_net, |
123 | ((struct sockaddr_in *) addr)->sin_addr.s_addr); | ||
124 | |||
125 | if (!dev) | ||
126 | return ret; | ||
122 | 127 | ||
123 | ret = rdma_copy_addr(dev_addr, dev, NULL); | 128 | ret = rdma_copy_addr(dev_addr, dev, NULL); |
124 | dev_put(dev); | 129 | dev_put(dev); |
130 | break; | ||
131 | case AF_INET6: | ||
132 | for_each_netdev(&init_net, dev) { | ||
133 | if (ipv6_chk_addr(&init_net, | ||
134 | &((struct sockaddr_in6 *) addr)->sin6_addr, | ||
135 | dev, 1)) { | ||
136 | ret = rdma_copy_addr(dev_addr, dev, NULL); | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | break; | ||
141 | default: | ||
142 | break; | ||
143 | } | ||
125 | return ret; | 144 | return ret; |
126 | } | 145 | } |
127 | EXPORT_SYMBOL(rdma_translate_ip); | 146 | EXPORT_SYMBOL(rdma_translate_ip); |
@@ -156,22 +175,37 @@ static void queue_req(struct addr_req *req) | |||
156 | mutex_unlock(&lock); | 175 | mutex_unlock(&lock); |
157 | } | 176 | } |
158 | 177 | ||
159 | static void addr_send_arp(struct sockaddr_in *dst_in) | 178 | static void addr_send_arp(struct sockaddr *dst_in) |
160 | { | 179 | { |
161 | struct rtable *rt; | 180 | struct rtable *rt; |
162 | struct flowi fl; | 181 | struct flowi fl; |
163 | __be32 dst_ip = dst_in->sin_addr.s_addr; | 182 | struct dst_entry *dst; |
164 | 183 | ||
165 | memset(&fl, 0, sizeof fl); | 184 | memset(&fl, 0, sizeof fl); |
166 | fl.nl_u.ip4_u.daddr = dst_ip; | 185 | if (dst_in->sa_family == AF_INET) { |
167 | if (ip_route_output_key(&init_net, &rt, &fl)) | 186 | fl.nl_u.ip4_u.daddr = |
168 | return; | 187 | ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; |
169 | 188 | ||
170 | neigh_event_send(rt->u.dst.neighbour, NULL); | 189 | if (ip_route_output_key(&init_net, &rt, &fl)) |
171 | ip_rt_put(rt); | 190 | return; |
191 | |||
192 | neigh_event_send(rt->u.dst.neighbour, NULL); | ||
193 | ip_rt_put(rt); | ||
194 | |||
195 | } else { | ||
196 | fl.nl_u.ip6_u.daddr = | ||
197 | ((struct sockaddr_in6 *) dst_in)->sin6_addr; | ||
198 | |||
199 | dst = ip6_route_output(&init_net, NULL, &fl); | ||
200 | if (!dst) | ||
201 | return; | ||
202 | |||
203 | neigh_event_send(dst->neighbour, NULL); | ||
204 | dst_release(dst); | ||
205 | } | ||
172 | } | 206 | } |
173 | 207 | ||
174 | static int addr_resolve_remote(struct sockaddr_in *src_in, | 208 | static int addr4_resolve_remote(struct sockaddr_in *src_in, |
175 | struct sockaddr_in *dst_in, | 209 | struct sockaddr_in *dst_in, |
176 | struct rdma_dev_addr *addr) | 210 | struct rdma_dev_addr *addr) |
177 | { | 211 | { |
@@ -220,10 +254,51 @@ out: | |||
220 | return ret; | 254 | return ret; |
221 | } | 255 | } |
222 | 256 | ||
257 | static int addr6_resolve_remote(struct sockaddr_in6 *src_in, | ||
258 | struct sockaddr_in6 *dst_in, | ||
259 | struct rdma_dev_addr *addr) | ||
260 | { | ||
261 | struct flowi fl; | ||
262 | struct neighbour *neigh; | ||
263 | struct dst_entry *dst; | ||
264 | int ret = -ENODATA; | ||
265 | |||
266 | memset(&fl, 0, sizeof fl); | ||
267 | fl.nl_u.ip6_u.daddr = dst_in->sin6_addr; | ||
268 | fl.nl_u.ip6_u.saddr = src_in->sin6_addr; | ||
269 | |||
270 | dst = ip6_route_output(&init_net, NULL, &fl); | ||
271 | if (!dst) | ||
272 | return ret; | ||
273 | |||
274 | if (dst->dev->flags & IFF_NOARP) { | ||
275 | ret = rdma_copy_addr(addr, dst->dev, NULL); | ||
276 | } else { | ||
277 | neigh = dst->neighbour; | ||
278 | if (neigh && (neigh->nud_state & NUD_VALID)) | ||
279 | ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); | ||
280 | } | ||
281 | |||
282 | dst_release(dst); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static int addr_resolve_remote(struct sockaddr *src_in, | ||
287 | struct sockaddr *dst_in, | ||
288 | struct rdma_dev_addr *addr) | ||
289 | { | ||
290 | if (src_in->sa_family == AF_INET) { | ||
291 | return addr4_resolve_remote((struct sockaddr_in *) src_in, | ||
292 | (struct sockaddr_in *) dst_in, addr); | ||
293 | } else | ||
294 | return addr6_resolve_remote((struct sockaddr_in6 *) src_in, | ||
295 | (struct sockaddr_in6 *) dst_in, addr); | ||
296 | } | ||
297 | |||
223 | static void process_req(struct work_struct *work) | 298 | static void process_req(struct work_struct *work) |
224 | { | 299 | { |
225 | struct addr_req *req, *temp_req; | 300 | struct addr_req *req, *temp_req; |
226 | struct sockaddr_in *src_in, *dst_in; | 301 | struct sockaddr *src_in, *dst_in; |
227 | struct list_head done_list; | 302 | struct list_head done_list; |
228 | 303 | ||
229 | INIT_LIST_HEAD(&done_list); | 304 | INIT_LIST_HEAD(&done_list); |
@@ -231,8 +306,8 @@ static void process_req(struct work_struct *work) | |||
231 | mutex_lock(&lock); | 306 | mutex_lock(&lock); |
232 | list_for_each_entry_safe(req, temp_req, &req_list, list) { | 307 | list_for_each_entry_safe(req, temp_req, &req_list, list) { |
233 | if (req->status == -ENODATA) { | 308 | if (req->status == -ENODATA) { |
234 | src_in = (struct sockaddr_in *) &req->src_addr; | 309 | src_in = (struct sockaddr *) &req->src_addr; |
235 | dst_in = (struct sockaddr_in *) &req->dst_addr; | 310 | dst_in = (struct sockaddr *) &req->dst_addr; |
236 | req->status = addr_resolve_remote(src_in, dst_in, | 311 | req->status = addr_resolve_remote(src_in, dst_in, |
237 | req->addr); | 312 | req->addr); |
238 | if (req->status && time_after_eq(jiffies, req->timeout)) | 313 | if (req->status && time_after_eq(jiffies, req->timeout)) |
@@ -251,41 +326,72 @@ static void process_req(struct work_struct *work) | |||
251 | 326 | ||
252 | list_for_each_entry_safe(req, temp_req, &done_list, list) { | 327 | list_for_each_entry_safe(req, temp_req, &done_list, list) { |
253 | list_del(&req->list); | 328 | list_del(&req->list); |
254 | req->callback(req->status, &req->src_addr, req->addr, | 329 | req->callback(req->status, (struct sockaddr *) &req->src_addr, |
255 | req->context); | 330 | req->addr, req->context); |
256 | put_client(req->client); | 331 | put_client(req->client); |
257 | kfree(req); | 332 | kfree(req); |
258 | } | 333 | } |
259 | } | 334 | } |
260 | 335 | ||
261 | static int addr_resolve_local(struct sockaddr_in *src_in, | 336 | static int addr_resolve_local(struct sockaddr *src_in, |
262 | struct sockaddr_in *dst_in, | 337 | struct sockaddr *dst_in, |
263 | struct rdma_dev_addr *addr) | 338 | struct rdma_dev_addr *addr) |
264 | { | 339 | { |
265 | struct net_device *dev; | 340 | struct net_device *dev; |
266 | __be32 src_ip = src_in->sin_addr.s_addr; | ||
267 | __be32 dst_ip = dst_in->sin_addr.s_addr; | ||
268 | int ret; | 341 | int ret; |
269 | 342 | ||
270 | dev = ip_dev_find(&init_net, dst_ip); | 343 | if (dst_in->sa_family == AF_INET) { |
271 | if (!dev) | 344 | __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr; |
272 | return -EADDRNOTAVAIL; | 345 | __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; |
273 | 346 | ||
274 | if (ipv4_is_zeronet(src_ip)) { | 347 | dev = ip_dev_find(&init_net, dst_ip); |
275 | src_in->sin_family = dst_in->sin_family; | 348 | if (!dev) |
276 | src_in->sin_addr.s_addr = dst_ip; | 349 | return -EADDRNOTAVAIL; |
277 | ret = rdma_copy_addr(addr, dev, dev->dev_addr); | 350 | |
278 | } else if (ipv4_is_loopback(src_ip)) { | 351 | if (ipv4_is_zeronet(src_ip)) { |
279 | ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); | 352 | src_in->sa_family = dst_in->sa_family; |
280 | if (!ret) | 353 | ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip; |
281 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | 354 | ret = rdma_copy_addr(addr, dev, dev->dev_addr); |
355 | } else if (ipv4_is_loopback(src_ip)) { | ||
356 | ret = rdma_translate_ip(dst_in, addr); | ||
357 | if (!ret) | ||
358 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | ||
359 | } else { | ||
360 | ret = rdma_translate_ip(src_in, addr); | ||
361 | if (!ret) | ||
362 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | ||
363 | } | ||
364 | dev_put(dev); | ||
282 | } else { | 365 | } else { |
283 | ret = rdma_translate_ip((struct sockaddr *)src_in, addr); | 366 | struct in6_addr *a; |
284 | if (!ret) | 367 | |
285 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | 368 | for_each_netdev(&init_net, dev) |
369 | if (ipv6_chk_addr(&init_net, | ||
370 | &((struct sockaddr_in6 *) addr)->sin6_addr, | ||
371 | dev, 1)) | ||
372 | break; | ||
373 | |||
374 | if (!dev) | ||
375 | return -EADDRNOTAVAIL; | ||
376 | |||
377 | a = &((struct sockaddr_in6 *) src_in)->sin6_addr; | ||
378 | |||
379 | if (ipv6_addr_any(a)) { | ||
380 | src_in->sa_family = dst_in->sa_family; | ||
381 | ((struct sockaddr_in6 *) src_in)->sin6_addr = | ||
382 | ((struct sockaddr_in6 *) dst_in)->sin6_addr; | ||
383 | ret = rdma_copy_addr(addr, dev, dev->dev_addr); | ||
384 | } else if (ipv6_addr_loopback(a)) { | ||
385 | ret = rdma_translate_ip(dst_in, addr); | ||
386 | if (!ret) | ||
387 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | ||
388 | } else { | ||
389 | ret = rdma_translate_ip(src_in, addr); | ||
390 | if (!ret) | ||
391 | memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); | ||
392 | } | ||
286 | } | 393 | } |
287 | 394 | ||
288 | dev_put(dev); | ||
289 | return ret; | 395 | return ret; |
290 | } | 396 | } |
291 | 397 | ||
@@ -296,7 +402,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, | |||
296 | struct rdma_dev_addr *addr, void *context), | 402 | struct rdma_dev_addr *addr, void *context), |
297 | void *context) | 403 | void *context) |
298 | { | 404 | { |
299 | struct sockaddr_in *src_in, *dst_in; | 405 | struct sockaddr *src_in, *dst_in; |
300 | struct addr_req *req; | 406 | struct addr_req *req; |
301 | int ret = 0; | 407 | int ret = 0; |
302 | 408 | ||
@@ -313,8 +419,8 @@ int rdma_resolve_ip(struct rdma_addr_client *client, | |||
313 | req->client = client; | 419 | req->client = client; |
314 | atomic_inc(&client->refcount); | 420 | atomic_inc(&client->refcount); |
315 | 421 | ||
316 | src_in = (struct sockaddr_in *) &req->src_addr; | 422 | src_in = (struct sockaddr *) &req->src_addr; |
317 | dst_in = (struct sockaddr_in *) &req->dst_addr; | 423 | dst_in = (struct sockaddr *) &req->dst_addr; |
318 | 424 | ||
319 | req->status = addr_resolve_local(src_in, dst_in, addr); | 425 | req->status = addr_resolve_local(src_in, dst_in, addr); |
320 | if (req->status == -EADDRNOTAVAIL) | 426 | if (req->status == -EADDRNOTAVAIL) |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d951896ff7fc..2a2e50871b40 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/inetdevice.h> | 42 | #include <linux/inetdevice.h> |
43 | 43 | ||
44 | #include <net/tcp.h> | 44 | #include <net/tcp.h> |
45 | #include <net/ipv6.h> | ||
45 | 46 | ||
46 | #include <rdma/rdma_cm.h> | 47 | #include <rdma/rdma_cm.h> |
47 | #include <rdma/rdma_cm_ib.h> | 48 | #include <rdma/rdma_cm_ib.h> |
@@ -636,7 +637,12 @@ static inline int cma_zero_addr(struct sockaddr *addr) | |||
636 | 637 | ||
637 | static inline int cma_loopback_addr(struct sockaddr *addr) | 638 | static inline int cma_loopback_addr(struct sockaddr *addr) |
638 | { | 639 | { |
639 | return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); | 640 | if (addr->sa_family == AF_INET) |
641 | return ipv4_is_loopback( | ||
642 | ((struct sockaddr_in *) addr)->sin_addr.s_addr); | ||
643 | else | ||
644 | return ipv6_addr_loopback( | ||
645 | &((struct sockaddr_in6 *) addr)->sin6_addr); | ||
640 | } | 646 | } |
641 | 647 | ||
642 | static inline int cma_any_addr(struct sockaddr *addr) | 648 | static inline int cma_any_addr(struct sockaddr *addr) |
@@ -1467,10 +1473,10 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) | |||
1467 | 1473 | ||
1468 | static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) | 1474 | static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) |
1469 | { | 1475 | { |
1470 | struct sockaddr_in addr_in; | 1476 | struct sockaddr_storage addr_in; |
1471 | 1477 | ||
1472 | memset(&addr_in, 0, sizeof addr_in); | 1478 | memset(&addr_in, 0, sizeof addr_in); |
1473 | addr_in.sin_family = af; | 1479 | addr_in.ss_family = af; |
1474 | return rdma_bind_addr(id, (struct sockaddr *) &addr_in); | 1480 | return rdma_bind_addr(id, (struct sockaddr *) &addr_in); |
1475 | } | 1481 | } |
1476 | 1482 | ||
@@ -2073,7 +2079,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2073 | struct rdma_id_private *id_priv; | 2079 | struct rdma_id_private *id_priv; |
2074 | int ret; | 2080 | int ret; |
2075 | 2081 | ||
2076 | if (addr->sa_family != AF_INET) | 2082 | if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) |
2077 | return -EAFNOSUPPORT; | 2083 | return -EAFNOSUPPORT; |
2078 | 2084 | ||
2079 | id_priv = container_of(id, struct rdma_id_private, id); | 2085 | id_priv = container_of(id, struct rdma_id_private, id); |
@@ -2113,31 +2119,59 @@ EXPORT_SYMBOL(rdma_bind_addr); | |||
2113 | static int cma_format_hdr(void *hdr, enum rdma_port_space ps, | 2119 | static int cma_format_hdr(void *hdr, enum rdma_port_space ps, |
2114 | struct rdma_route *route) | 2120 | struct rdma_route *route) |
2115 | { | 2121 | { |
2116 | struct sockaddr_in *src4, *dst4; | ||
2117 | struct cma_hdr *cma_hdr; | 2122 | struct cma_hdr *cma_hdr; |
2118 | struct sdp_hh *sdp_hdr; | 2123 | struct sdp_hh *sdp_hdr; |
2119 | 2124 | ||
2120 | src4 = (struct sockaddr_in *) &route->addr.src_addr; | 2125 | if (route->addr.src_addr.ss_family == AF_INET) { |
2121 | dst4 = (struct sockaddr_in *) &route->addr.dst_addr; | 2126 | struct sockaddr_in *src4, *dst4; |
2122 | 2127 | ||
2123 | switch (ps) { | 2128 | src4 = (struct sockaddr_in *) &route->addr.src_addr; |
2124 | case RDMA_PS_SDP: | 2129 | dst4 = (struct sockaddr_in *) &route->addr.dst_addr; |
2125 | sdp_hdr = hdr; | 2130 | |
2126 | if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) | 2131 | switch (ps) { |
2127 | return -EINVAL; | 2132 | case RDMA_PS_SDP: |
2128 | sdp_set_ip_ver(sdp_hdr, 4); | 2133 | sdp_hdr = hdr; |
2129 | sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; | 2134 | if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) |
2130 | sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; | 2135 | return -EINVAL; |
2131 | sdp_hdr->port = src4->sin_port; | 2136 | sdp_set_ip_ver(sdp_hdr, 4); |
2132 | break; | 2137 | sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; |
2133 | default: | 2138 | sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; |
2134 | cma_hdr = hdr; | 2139 | sdp_hdr->port = src4->sin_port; |
2135 | cma_hdr->cma_version = CMA_VERSION; | 2140 | break; |
2136 | cma_set_ip_ver(cma_hdr, 4); | 2141 | default: |
2137 | cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; | 2142 | cma_hdr = hdr; |
2138 | cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; | 2143 | cma_hdr->cma_version = CMA_VERSION; |
2139 | cma_hdr->port = src4->sin_port; | 2144 | cma_set_ip_ver(cma_hdr, 4); |
2140 | break; | 2145 | cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; |
2146 | cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; | ||
2147 | cma_hdr->port = src4->sin_port; | ||
2148 | break; | ||
2149 | } | ||
2150 | } else { | ||
2151 | struct sockaddr_in6 *src6, *dst6; | ||
2152 | |||
2153 | src6 = (struct sockaddr_in6 *) &route->addr.src_addr; | ||
2154 | dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; | ||
2155 | |||
2156 | switch (ps) { | ||
2157 | case RDMA_PS_SDP: | ||
2158 | sdp_hdr = hdr; | ||
2159 | if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) | ||
2160 | return -EINVAL; | ||
2161 | sdp_set_ip_ver(sdp_hdr, 6); | ||
2162 | sdp_hdr->src_addr.ip6 = src6->sin6_addr; | ||
2163 | sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; | ||
2164 | sdp_hdr->port = src6->sin6_port; | ||
2165 | break; | ||
2166 | default: | ||
2167 | cma_hdr = hdr; | ||
2168 | cma_hdr->cma_version = CMA_VERSION; | ||
2169 | cma_set_ip_ver(cma_hdr, 6); | ||
2170 | cma_hdr->src_addr.ip6 = src6->sin6_addr; | ||
2171 | cma_hdr->dst_addr.ip6 = dst6->sin6_addr; | ||
2172 | cma_hdr->port = src6->sin6_port; | ||
2173 | break; | ||
2174 | } | ||
2141 | } | 2175 | } |
2142 | return 0; | 2176 | return 0; |
2143 | } | 2177 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 7fc35cf0cddf..c825142a2fb7 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -175,6 +175,13 @@ struct ehca_queue_map { | |||
175 | unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ | 175 | unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ |
176 | }; | 176 | }; |
177 | 177 | ||
178 | /* function to calculate the next index for the qmap */ | ||
179 | static inline unsigned int next_index(unsigned int cur_index, unsigned int limit) | ||
180 | { | ||
181 | unsigned int temp = cur_index + 1; | ||
182 | return (temp == limit) ? 0 : temp; | ||
183 | } | ||
184 | |||
178 | struct ehca_qp { | 185 | struct ehca_qp { |
179 | union { | 186 | union { |
180 | struct ib_qp ib_qp; | 187 | struct ib_qp ib_qp; |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 49660dfa1867..523e733c630e 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -113,7 +113,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
113 | if (h_ret != H_SUCCESS || vpage) | 113 | if (h_ret != H_SUCCESS || vpage) |
114 | goto create_eq_exit2; | 114 | goto create_eq_exit2; |
115 | } else { | 115 | } else { |
116 | if (h_ret != H_PAGE_REGISTERED || !vpage) | 116 | if (h_ret != H_PAGE_REGISTERED) |
117 | goto create_eq_exit2; | 117 | goto create_eq_exit2; |
118 | } | 118 | } |
119 | } | 119 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index bec7e0249358..3b77b674cbf6 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -717,6 +717,7 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
717 | const u64 *handle; | 717 | const u64 *handle; |
718 | struct ib_pd *ibpd; | 718 | struct ib_pd *ibpd; |
719 | int ret, i, eq_size; | 719 | int ret, i, eq_size; |
720 | unsigned long flags; | ||
720 | 721 | ||
721 | handle = of_get_property(dev->node, "ibm,hca-handle", NULL); | 722 | handle = of_get_property(dev->node, "ibm,hca-handle", NULL); |
722 | if (!handle) { | 723 | if (!handle) { |
@@ -830,9 +831,9 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
830 | ehca_err(&shca->ib_device, | 831 | ehca_err(&shca->ib_device, |
831 | "Cannot create device attributes ret=%d", ret); | 832 | "Cannot create device attributes ret=%d", ret); |
832 | 833 | ||
833 | spin_lock(&shca_list_lock); | 834 | spin_lock_irqsave(&shca_list_lock, flags); |
834 | list_add(&shca->shca_list, &shca_list); | 835 | list_add(&shca->shca_list, &shca_list); |
835 | spin_unlock(&shca_list_lock); | 836 | spin_unlock_irqrestore(&shca_list_lock, flags); |
836 | 837 | ||
837 | return 0; | 838 | return 0; |
838 | 839 | ||
@@ -878,6 +879,7 @@ probe1: | |||
878 | static int __devexit ehca_remove(struct of_device *dev) | 879 | static int __devexit ehca_remove(struct of_device *dev) |
879 | { | 880 | { |
880 | struct ehca_shca *shca = dev->dev.driver_data; | 881 | struct ehca_shca *shca = dev->dev.driver_data; |
882 | unsigned long flags; | ||
881 | int ret; | 883 | int ret; |
882 | 884 | ||
883 | sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp); | 885 | sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp); |
@@ -915,9 +917,9 @@ static int __devexit ehca_remove(struct of_device *dev) | |||
915 | 917 | ||
916 | ib_dealloc_device(&shca->ib_device); | 918 | ib_dealloc_device(&shca->ib_device); |
917 | 919 | ||
918 | spin_lock(&shca_list_lock); | 920 | spin_lock_irqsave(&shca_list_lock, flags); |
919 | list_del(&shca->shca_list); | 921 | list_del(&shca->shca_list); |
920 | spin_unlock(&shca_list_lock); | 922 | spin_unlock_irqrestore(&shca_list_lock, flags); |
921 | 923 | ||
922 | return ret; | 924 | return ret; |
923 | } | 925 | } |
@@ -975,6 +977,7 @@ static int ehca_mem_notifier(struct notifier_block *nb, | |||
975 | unsigned long action, void *data) | 977 | unsigned long action, void *data) |
976 | { | 978 | { |
977 | static unsigned long ehca_dmem_warn_time; | 979 | static unsigned long ehca_dmem_warn_time; |
980 | unsigned long flags; | ||
978 | 981 | ||
979 | switch (action) { | 982 | switch (action) { |
980 | case MEM_CANCEL_OFFLINE: | 983 | case MEM_CANCEL_OFFLINE: |
@@ -985,12 +988,12 @@ static int ehca_mem_notifier(struct notifier_block *nb, | |||
985 | case MEM_GOING_ONLINE: | 988 | case MEM_GOING_ONLINE: |
986 | case MEM_GOING_OFFLINE: | 989 | case MEM_GOING_OFFLINE: |
987 | /* only ok if no hca is attached to the lpar */ | 990 | /* only ok if no hca is attached to the lpar */ |
988 | spin_lock(&shca_list_lock); | 991 | spin_lock_irqsave(&shca_list_lock, flags); |
989 | if (list_empty(&shca_list)) { | 992 | if (list_empty(&shca_list)) { |
990 | spin_unlock(&shca_list_lock); | 993 | spin_unlock_irqrestore(&shca_list_lock, flags); |
991 | return NOTIFY_OK; | 994 | return NOTIFY_OK; |
992 | } else { | 995 | } else { |
993 | spin_unlock(&shca_list_lock); | 996 | spin_unlock_irqrestore(&shca_list_lock, flags); |
994 | if (printk_timed_ratelimit(&ehca_dmem_warn_time, | 997 | if (printk_timed_ratelimit(&ehca_dmem_warn_time, |
995 | 30 * 1000)) | 998 | 30 * 1000)) |
996 | ehca_gen_err("DMEM operations are not allowed" | 999 | ehca_gen_err("DMEM operations are not allowed" |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index cadbf0cdd910..f161cf173dbe 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, | |||
1138 | return -EFAULT; | 1138 | return -EFAULT; |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | tail_idx = (qmap->tail + 1) % qmap->entries; | 1141 | tail_idx = next_index(qmap->tail, qmap->entries); |
1142 | wqe_idx = q_ofs / ipz_queue->qe_size; | 1142 | wqe_idx = q_ofs / ipz_queue->qe_size; |
1143 | 1143 | ||
1144 | /* check all processed wqes, whether a cqe is requested or not */ | 1144 | /* check all processed wqes, whether a cqe is requested or not */ |
1145 | while (tail_idx != wqe_idx) { | 1145 | while (tail_idx != wqe_idx) { |
1146 | if (qmap->map[tail_idx].cqe_req) | 1146 | if (qmap->map[tail_idx].cqe_req) |
1147 | qmap->left_to_poll++; | 1147 | qmap->left_to_poll++; |
1148 | tail_idx = (tail_idx + 1) % qmap->entries; | 1148 | tail_idx = next_index(tail_idx, qmap->entries); |
1149 | } | 1149 | } |
1150 | /* save index in queue, where we have to start flushing */ | 1150 | /* save index in queue, where we have to start flushing */ |
1151 | qmap->next_wqe_idx = wqe_idx; | 1151 | qmap->next_wqe_idx = wqe_idx; |
@@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) | |||
1195 | } else { | 1195 | } else { |
1196 | spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); | 1196 | spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); |
1197 | my_qp->sq_map.left_to_poll = 0; | 1197 | my_qp->sq_map.left_to_poll = 0; |
1198 | my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % | 1198 | my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, |
1199 | my_qp->sq_map.entries; | 1199 | my_qp->sq_map.entries); |
1200 | spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); | 1200 | spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); |
1201 | 1201 | ||
1202 | spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); | 1202 | spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); |
1203 | my_qp->rq_map.left_to_poll = 0; | 1203 | my_qp->rq_map.left_to_poll = 0; |
1204 | my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % | 1204 | my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, |
1205 | my_qp->rq_map.entries; | 1205 | my_qp->rq_map.entries); |
1206 | spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); | 1206 | spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); |
1207 | } | 1207 | } |
1208 | 1208 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 00a648f4316c..c7112686782f 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -726,13 +726,13 @@ repoll: | |||
726 | * set left_to_poll to 0 because in error state, we will not | 726 | * set left_to_poll to 0 because in error state, we will not |
727 | * get any additional CQEs | 727 | * get any additional CQEs |
728 | */ | 728 | */ |
729 | my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % | 729 | my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, |
730 | my_qp->sq_map.entries; | 730 | my_qp->sq_map.entries); |
731 | my_qp->sq_map.left_to_poll = 0; | 731 | my_qp->sq_map.left_to_poll = 0; |
732 | ehca_add_to_err_list(my_qp, 1); | 732 | ehca_add_to_err_list(my_qp, 1); |
733 | 733 | ||
734 | my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % | 734 | my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, |
735 | my_qp->rq_map.entries; | 735 | my_qp->rq_map.entries); |
736 | my_qp->rq_map.left_to_poll = 0; | 736 | my_qp->rq_map.left_to_poll = 0; |
737 | if (HAS_RQ(my_qp)) | 737 | if (HAS_RQ(my_qp)) |
738 | ehca_add_to_err_list(my_qp, 0); | 738 | ehca_add_to_err_list(my_qp, 0); |
@@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, | |||
860 | 860 | ||
861 | /* mark as reported and advance next_wqe pointer */ | 861 | /* mark as reported and advance next_wqe pointer */ |
862 | qmap_entry->reported = 1; | 862 | qmap_entry->reported = 1; |
863 | qmap->next_wqe_idx++; | 863 | qmap->next_wqe_idx = next_index(qmap->next_wqe_idx, |
864 | if (qmap->next_wqe_idx == qmap->entries) | 864 | qmap->entries); |
865 | qmap->next_wqe_idx = 0; | ||
866 | qmap_entry = &qmap->map[qmap->next_wqe_idx]; | 865 | qmap_entry = &qmap->map[qmap->next_wqe_idx]; |
867 | 866 | ||
868 | wc++; nr++; | 867 | wc++; nr++; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index ad0aab60b051..69c0ce321b4e 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -661,6 +661,8 @@ bail: | |||
661 | static void __devexit cleanup_device(struct ipath_devdata *dd) | 661 | static void __devexit cleanup_device(struct ipath_devdata *dd) |
662 | { | 662 | { |
663 | int port; | 663 | int port; |
664 | struct ipath_portdata **tmp; | ||
665 | unsigned long flags; | ||
664 | 666 | ||
665 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { | 667 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { |
666 | /* can't do anything more with chip; needs re-init */ | 668 | /* can't do anything more with chip; needs re-init */ |
@@ -742,20 +744,21 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) | |||
742 | 744 | ||
743 | /* | 745 | /* |
744 | * free any resources still in use (usually just kernel ports) | 746 | * free any resources still in use (usually just kernel ports) |
745 | * at unload; we do for portcnt, not cfgports, because cfgports | 747 | * at unload; we do for portcnt, because that's what we allocate. |
746 | * could have changed while we were loaded. | 748 | * We acquire lock to be really paranoid that ipath_pd isn't being |
749 | * accessed from some interrupt-related code (that should not happen, | ||
750 | * but best to be sure). | ||
747 | */ | 751 | */ |
752 | spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); | ||
753 | tmp = dd->ipath_pd; | ||
754 | dd->ipath_pd = NULL; | ||
755 | spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); | ||
748 | for (port = 0; port < dd->ipath_portcnt; port++) { | 756 | for (port = 0; port < dd->ipath_portcnt; port++) { |
749 | struct ipath_portdata *pd = dd->ipath_pd[port]; | 757 | struct ipath_portdata *pd = tmp[port]; |
750 | dd->ipath_pd[port] = NULL; | 758 | tmp[port] = NULL; /* debugging paranoia */ |
751 | ipath_free_pddata(dd, pd); | 759 | ipath_free_pddata(dd, pd); |
752 | } | 760 | } |
753 | kfree(dd->ipath_pd); | 761 | kfree(tmp); |
754 | /* | ||
755 | * debuggability, in case some cleanup path tries to use it | ||
756 | * after this | ||
757 | */ | ||
758 | dd->ipath_pd = NULL; | ||
759 | } | 762 | } |
760 | 763 | ||
761 | static void __devexit ipath_remove_one(struct pci_dev *pdev) | 764 | static void __devexit ipath_remove_one(struct pci_dev *pdev) |
@@ -2586,6 +2589,7 @@ int ipath_reset_device(int unit) | |||
2586 | { | 2589 | { |
2587 | int ret, i; | 2590 | int ret, i; |
2588 | struct ipath_devdata *dd = ipath_lookup(unit); | 2591 | struct ipath_devdata *dd = ipath_lookup(unit); |
2592 | unsigned long flags; | ||
2589 | 2593 | ||
2590 | if (!dd) { | 2594 | if (!dd) { |
2591 | ret = -ENODEV; | 2595 | ret = -ENODEV; |
@@ -2611,18 +2615,21 @@ int ipath_reset_device(int unit) | |||
2611 | goto bail; | 2615 | goto bail; |
2612 | } | 2616 | } |
2613 | 2617 | ||
2618 | spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); | ||
2614 | if (dd->ipath_pd) | 2619 | if (dd->ipath_pd) |
2615 | for (i = 1; i < dd->ipath_cfgports; i++) { | 2620 | for (i = 1; i < dd->ipath_cfgports; i++) { |
2616 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { | 2621 | if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) |
2617 | ipath_dbg("unit %u port %d is in use " | 2622 | continue; |
2618 | "(PID %u cmd %s), can't reset\n", | 2623 | spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); |
2619 | unit, i, | 2624 | ipath_dbg("unit %u port %d is in use " |
2620 | pid_nr(dd->ipath_pd[i]->port_pid), | 2625 | "(PID %u cmd %s), can't reset\n", |
2621 | dd->ipath_pd[i]->port_comm); | 2626 | unit, i, |
2622 | ret = -EBUSY; | 2627 | pid_nr(dd->ipath_pd[i]->port_pid), |
2623 | goto bail; | 2628 | dd->ipath_pd[i]->port_comm); |
2624 | } | 2629 | ret = -EBUSY; |
2630 | goto bail; | ||
2625 | } | 2631 | } |
2632 | spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); | ||
2626 | 2633 | ||
2627 | if (dd->ipath_flags & IPATH_HAS_SEND_DMA) | 2634 | if (dd->ipath_flags & IPATH_HAS_SEND_DMA) |
2628 | teardown_sdma(dd); | 2635 | teardown_sdma(dd); |
@@ -2656,9 +2663,12 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) | |||
2656 | { | 2663 | { |
2657 | int i, sub, any = 0; | 2664 | int i, sub, any = 0; |
2658 | struct pid *pid; | 2665 | struct pid *pid; |
2666 | unsigned long flags; | ||
2659 | 2667 | ||
2660 | if (!dd->ipath_pd) | 2668 | if (!dd->ipath_pd) |
2661 | return 0; | 2669 | return 0; |
2670 | |||
2671 | spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); | ||
2662 | for (i = 1; i < dd->ipath_cfgports; i++) { | 2672 | for (i = 1; i < dd->ipath_cfgports; i++) { |
2663 | if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) | 2673 | if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) |
2664 | continue; | 2674 | continue; |
@@ -2682,6 +2692,7 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) | |||
2682 | any++; | 2692 | any++; |
2683 | } | 2693 | } |
2684 | } | 2694 | } |
2695 | spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); | ||
2685 | return any; | 2696 | return any; |
2686 | } | 2697 | } |
2687 | 2698 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 1af1f3a907c6..239d4e8068ac 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -223,8 +223,13 @@ static int ipath_get_base_info(struct file *fp, | |||
223 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); | 223 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); |
224 | } | 224 | } |
225 | 225 | ||
226 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / | 226 | /* |
227 | dd->ipath_palign; | 227 | * All user buffers are 2KB buffers. If we ever support |
228 | * giving 4KB buffers to user processes, this will need some | ||
229 | * work. | ||
230 | */ | ||
231 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - | ||
232 | (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; | ||
228 | kinfo->spi_pioalign = dd->ipath_palign; | 233 | kinfo->spi_pioalign = dd->ipath_palign; |
229 | 234 | ||
230 | kinfo->spi_qpair = IPATH_KD_QP; | 235 | kinfo->spi_qpair = IPATH_KD_QP; |
@@ -2041,7 +2046,9 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
2041 | struct ipath_filedata *fd; | 2046 | struct ipath_filedata *fd; |
2042 | struct ipath_portdata *pd; | 2047 | struct ipath_portdata *pd; |
2043 | struct ipath_devdata *dd; | 2048 | struct ipath_devdata *dd; |
2049 | unsigned long flags; | ||
2044 | unsigned port; | 2050 | unsigned port; |
2051 | struct pid *pid; | ||
2045 | 2052 | ||
2046 | ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", | 2053 | ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", |
2047 | (long)in->i_rdev, fp->private_data); | 2054 | (long)in->i_rdev, fp->private_data); |
@@ -2074,14 +2081,13 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
2074 | mutex_unlock(&ipath_mutex); | 2081 | mutex_unlock(&ipath_mutex); |
2075 | goto bail; | 2082 | goto bail; |
2076 | } | 2083 | } |
2084 | /* early; no interrupt users after this */ | ||
2085 | spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); | ||
2077 | port = pd->port_port; | 2086 | port = pd->port_port; |
2078 | 2087 | dd->ipath_pd[port] = NULL; | |
2079 | if (pd->port_hdrqfull) { | 2088 | pid = pd->port_pid; |
2080 | ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " | 2089 | pd->port_pid = NULL; |
2081 | "during run\n", pd->port_comm, pid_nr(pd->port_pid), | 2090 | spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); |
2082 | pd->port_hdrqfull); | ||
2083 | pd->port_hdrqfull = 0; | ||
2084 | } | ||
2085 | 2091 | ||
2086 | if (pd->port_rcvwait_to || pd->port_piowait_to | 2092 | if (pd->port_rcvwait_to || pd->port_piowait_to |
2087 | || pd->port_rcvnowait || pd->port_pionowait) { | 2093 | || pd->port_rcvnowait || pd->port_pionowait) { |
@@ -2138,13 +2144,11 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
2138 | unlock_expected_tids(pd); | 2144 | unlock_expected_tids(pd); |
2139 | ipath_stats.sps_ports--; | 2145 | ipath_stats.sps_ports--; |
2140 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", | 2146 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", |
2141 | pd->port_comm, pid_nr(pd->port_pid), | 2147 | pd->port_comm, pid_nr(pid), |
2142 | dd->ipath_unit, port); | 2148 | dd->ipath_unit, port); |
2143 | } | 2149 | } |
2144 | 2150 | ||
2145 | put_pid(pd->port_pid); | 2151 | put_pid(pid); |
2146 | pd->port_pid = NULL; | ||
2147 | dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ | ||
2148 | mutex_unlock(&ipath_mutex); | 2152 | mutex_unlock(&ipath_mutex); |
2149 | ipath_free_pddata(dd, pd); /* after releasing the mutex */ | 2153 | ipath_free_pddata(dd, pd); /* after releasing the mutex */ |
2150 | 2154 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 8bb5170b4e41..53912c327bfe 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -86,7 +86,7 @@ static int create_file(const char *name, mode_t mode, | |||
86 | *dentry = NULL; | 86 | *dentry = NULL; |
87 | mutex_lock(&parent->d_inode->i_mutex); | 87 | mutex_lock(&parent->d_inode->i_mutex); |
88 | *dentry = lookup_one_len(name, parent, strlen(name)); | 88 | *dentry = lookup_one_len(name, parent, strlen(name)); |
89 | if (!IS_ERR(dentry)) | 89 | if (!IS_ERR(*dentry)) |
90 | error = ipathfs_mknod(parent->d_inode, *dentry, | 90 | error = ipathfs_mknod(parent->d_inode, *dentry, |
91 | mode, fops, data); | 91 | mode, fops, data); |
92 | else | 92 | else |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index 421cc2af891f..fbf8c5379ea8 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -721,6 +721,12 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
721 | INFINIPATH_HWE_SERDESPLLFAILED); | 721 | INFINIPATH_HWE_SERDESPLLFAILED); |
722 | } | 722 | } |
723 | 723 | ||
724 | dd->ibdeltainprog = 1; | ||
725 | dd->ibsymsnap = | ||
726 | ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
727 | dd->iblnkerrsnap = | ||
728 | ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
729 | |||
724 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 730 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
725 | config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); | 731 | config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); |
726 | 732 | ||
@@ -810,6 +816,36 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | |||
810 | { | 816 | { |
811 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 817 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
812 | 818 | ||
819 | if (dd->ibsymdelta || dd->iblnkerrdelta || | ||
820 | dd->ibdeltainprog) { | ||
821 | u64 diagc; | ||
822 | /* enable counter writes */ | ||
823 | diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); | ||
824 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, | ||
825 | diagc | INFINIPATH_DC_COUNTERWREN); | ||
826 | |||
827 | if (dd->ibsymdelta || dd->ibdeltainprog) { | ||
828 | val = ipath_read_creg32(dd, | ||
829 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
830 | if (dd->ibdeltainprog) | ||
831 | val -= val - dd->ibsymsnap; | ||
832 | val -= dd->ibsymdelta; | ||
833 | ipath_write_creg(dd, | ||
834 | dd->ipath_cregs->cr_ibsymbolerrcnt, val); | ||
835 | } | ||
836 | if (dd->iblnkerrdelta || dd->ibdeltainprog) { | ||
837 | val = ipath_read_creg32(dd, | ||
838 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
839 | if (dd->ibdeltainprog) | ||
840 | val -= val - dd->iblnkerrsnap; | ||
841 | val -= dd->iblnkerrdelta; | ||
842 | ipath_write_creg(dd, | ||
843 | dd->ipath_cregs->cr_iblinkerrrecovcnt, val); | ||
844 | } | ||
845 | |||
846 | /* and disable counter writes */ | ||
847 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); | ||
848 | } | ||
813 | val |= INFINIPATH_SERDC0_TXIDLE; | 849 | val |= INFINIPATH_SERDC0_TXIDLE; |
814 | ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", | 850 | ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", |
815 | (unsigned long long) val); | 851 | (unsigned long long) val); |
@@ -1749,6 +1785,31 @@ static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b) | |||
1749 | 1785 | ||
1750 | static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | 1786 | static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) |
1751 | { | 1787 | { |
1788 | if (ibup) { | ||
1789 | if (dd->ibdeltainprog) { | ||
1790 | dd->ibdeltainprog = 0; | ||
1791 | dd->ibsymdelta += | ||
1792 | ipath_read_creg32(dd, | ||
1793 | dd->ipath_cregs->cr_ibsymbolerrcnt) - | ||
1794 | dd->ibsymsnap; | ||
1795 | dd->iblnkerrdelta += | ||
1796 | ipath_read_creg32(dd, | ||
1797 | dd->ipath_cregs->cr_iblinkerrrecovcnt) - | ||
1798 | dd->iblnkerrsnap; | ||
1799 | } | ||
1800 | } else { | ||
1801 | dd->ipath_lli_counter = 0; | ||
1802 | if (!dd->ibdeltainprog) { | ||
1803 | dd->ibdeltainprog = 1; | ||
1804 | dd->ibsymsnap = | ||
1805 | ipath_read_creg32(dd, | ||
1806 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1807 | dd->iblnkerrsnap = | ||
1808 | ipath_read_creg32(dd, | ||
1809 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1752 | ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), | 1813 | ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), |
1753 | ipath_ib_linktrstate(dd, ibcs)); | 1814 | ipath_ib_linktrstate(dd, ibcs)); |
1754 | return 0; | 1815 | return 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index 9839e20119bc..b2a9d4c155d1 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c | |||
@@ -951,6 +951,12 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) | |||
951 | INFINIPATH_HWE_SERDESPLLFAILED); | 951 | INFINIPATH_HWE_SERDESPLLFAILED); |
952 | } | 952 | } |
953 | 953 | ||
954 | dd->ibdeltainprog = 1; | ||
955 | dd->ibsymsnap = | ||
956 | ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
957 | dd->iblnkerrsnap = | ||
958 | ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
959 | |||
954 | if (!dd->ipath_ibcddrctrl) { | 960 | if (!dd->ipath_ibcddrctrl) { |
955 | /* not on re-init after reset */ | 961 | /* not on re-init after reset */ |
956 | dd->ipath_ibcddrctrl = | 962 | dd->ipath_ibcddrctrl = |
@@ -1084,6 +1090,37 @@ static void ipath_7220_config_jint(struct ipath_devdata *dd, | |||
1084 | static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) | 1090 | static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) |
1085 | { | 1091 | { |
1086 | u64 val; | 1092 | u64 val; |
1093 | if (dd->ibsymdelta || dd->iblnkerrdelta || | ||
1094 | dd->ibdeltainprog) { | ||
1095 | u64 diagc; | ||
1096 | /* enable counter writes */ | ||
1097 | diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); | ||
1098 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, | ||
1099 | diagc | INFINIPATH_DC_COUNTERWREN); | ||
1100 | |||
1101 | if (dd->ibsymdelta || dd->ibdeltainprog) { | ||
1102 | val = ipath_read_creg32(dd, | ||
1103 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1104 | if (dd->ibdeltainprog) | ||
1105 | val -= val - dd->ibsymsnap; | ||
1106 | val -= dd->ibsymdelta; | ||
1107 | ipath_write_creg(dd, | ||
1108 | dd->ipath_cregs->cr_ibsymbolerrcnt, val); | ||
1109 | } | ||
1110 | if (dd->iblnkerrdelta || dd->ibdeltainprog) { | ||
1111 | val = ipath_read_creg32(dd, | ||
1112 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1113 | if (dd->ibdeltainprog) | ||
1114 | val -= val - dd->iblnkerrsnap; | ||
1115 | val -= dd->iblnkerrdelta; | ||
1116 | ipath_write_creg(dd, | ||
1117 | dd->ipath_cregs->cr_iblinkerrrecovcnt, val); | ||
1118 | } | ||
1119 | |||
1120 | /* and disable counter writes */ | ||
1121 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); | ||
1122 | } | ||
1123 | |||
1087 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; | 1124 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; |
1088 | wake_up(&dd->ipath_autoneg_wait); | 1125 | wake_up(&dd->ipath_autoneg_wait); |
1089 | cancel_delayed_work(&dd->ipath_autoneg_work); | 1126 | cancel_delayed_work(&dd->ipath_autoneg_work); |
@@ -2325,7 +2362,7 @@ static void try_auto_neg(struct ipath_devdata *dd) | |||
2325 | 2362 | ||
2326 | static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | 2363 | static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) |
2327 | { | 2364 | { |
2328 | int ret = 0; | 2365 | int ret = 0, symadj = 0; |
2329 | u32 ltstate = ipath_ib_linkstate(dd, ibcs); | 2366 | u32 ltstate = ipath_ib_linkstate(dd, ibcs); |
2330 | 2367 | ||
2331 | dd->ipath_link_width_active = | 2368 | dd->ipath_link_width_active = |
@@ -2368,6 +2405,13 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | |||
2368 | ipath_dbg("DDR negotiation try, %u/%u\n", | 2405 | ipath_dbg("DDR negotiation try, %u/%u\n", |
2369 | dd->ipath_autoneg_tries, | 2406 | dd->ipath_autoneg_tries, |
2370 | IPATH_AUTONEG_TRIES); | 2407 | IPATH_AUTONEG_TRIES); |
2408 | if (!dd->ibdeltainprog) { | ||
2409 | dd->ibdeltainprog = 1; | ||
2410 | dd->ibsymsnap = ipath_read_creg32(dd, | ||
2411 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
2412 | dd->iblnkerrsnap = ipath_read_creg32(dd, | ||
2413 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
2414 | } | ||
2371 | try_auto_neg(dd); | 2415 | try_auto_neg(dd); |
2372 | ret = 1; /* no other IB status change processing */ | 2416 | ret = 1; /* no other IB status change processing */ |
2373 | } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) | 2417 | } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) |
@@ -2388,6 +2432,7 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | |||
2388 | set_speed_fast(dd, | 2432 | set_speed_fast(dd, |
2389 | dd->ipath_link_speed_enabled); | 2433 | dd->ipath_link_speed_enabled); |
2390 | wake_up(&dd->ipath_autoneg_wait); | 2434 | wake_up(&dd->ipath_autoneg_wait); |
2435 | symadj = 1; | ||
2391 | } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { | 2436 | } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { |
2392 | /* | 2437 | /* |
2393 | * clear autoneg failure flag, and do setup | 2438 | * clear autoneg failure flag, and do setup |
@@ -2403,22 +2448,28 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | |||
2403 | IBA7220_IBC_IBTA_1_2_MASK; | 2448 | IBA7220_IBC_IBTA_1_2_MASK; |
2404 | ipath_write_kreg(dd, | 2449 | ipath_write_kreg(dd, |
2405 | IPATH_KREG_OFFSET(IBNCModeCtrl), 0); | 2450 | IPATH_KREG_OFFSET(IBNCModeCtrl), 0); |
2451 | symadj = 1; | ||
2406 | } | 2452 | } |
2407 | } | 2453 | } |
2408 | /* | 2454 | /* |
2409 | * if we are in 1X, and are in autoneg width, it | 2455 | * if we are in 1X on rev1 only, and are in autoneg width, |
2410 | * could be due to an xgxs problem, so if we haven't | 2456 | * it could be due to an xgxs problem, so if we haven't |
2411 | * already tried, try twice to get to 4X; if we | 2457 | * already tried, try twice to get to 4X; if we |
2412 | * tried, and couldn't, report it, since it will | 2458 | * tried, and couldn't, report it, since it will |
2413 | * probably not be what is desired. | 2459 | * probably not be what is desired. |
2414 | */ | 2460 | */ |
2415 | if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X | | 2461 | if (dd->ipath_minrev == 1 && |
2462 | (dd->ipath_link_width_enabled & (IB_WIDTH_1X | | ||
2416 | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) | 2463 | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) |
2417 | && dd->ipath_link_width_active == IB_WIDTH_1X | 2464 | && dd->ipath_link_width_active == IB_WIDTH_1X |
2418 | && dd->ipath_x1_fix_tries < 3) { | 2465 | && dd->ipath_x1_fix_tries < 3) { |
2419 | if (++dd->ipath_x1_fix_tries == 3) | 2466 | if (++dd->ipath_x1_fix_tries == 3) { |
2420 | dev_info(&dd->pcidev->dev, | 2467 | dev_info(&dd->pcidev->dev, |
2421 | "IB link is in 1X mode\n"); | 2468 | "IB link is in 1X mode\n"); |
2469 | if (!(dd->ipath_flags & | ||
2470 | IPATH_IB_AUTONEG_INPROG)) | ||
2471 | symadj = 1; | ||
2472 | } | ||
2422 | else { | 2473 | else { |
2423 | ipath_cdbg(VERBOSE, "IB 1X in " | 2474 | ipath_cdbg(VERBOSE, "IB 1X in " |
2424 | "auto-width, try %u to be " | 2475 | "auto-width, try %u to be " |
@@ -2429,7 +2480,8 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | |||
2429 | dd->ipath_f_xgxs_reset(dd); | 2480 | dd->ipath_f_xgxs_reset(dd); |
2430 | ret = 1; /* skip other processing */ | 2481 | ret = 1; /* skip other processing */ |
2431 | } | 2482 | } |
2432 | } | 2483 | } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) |
2484 | symadj = 1; | ||
2433 | 2485 | ||
2434 | if (!ret) { | 2486 | if (!ret) { |
2435 | dd->delay_mult = rate_to_delay | 2487 | dd->delay_mult = rate_to_delay |
@@ -2440,6 +2492,25 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | |||
2440 | } | 2492 | } |
2441 | } | 2493 | } |
2442 | 2494 | ||
2495 | if (symadj) { | ||
2496 | if (dd->ibdeltainprog) { | ||
2497 | dd->ibdeltainprog = 0; | ||
2498 | dd->ibsymdelta += ipath_read_creg32(dd, | ||
2499 | dd->ipath_cregs->cr_ibsymbolerrcnt) - | ||
2500 | dd->ibsymsnap; | ||
2501 | dd->iblnkerrdelta += ipath_read_creg32(dd, | ||
2502 | dd->ipath_cregs->cr_iblinkerrrecovcnt) - | ||
2503 | dd->iblnkerrsnap; | ||
2504 | } | ||
2505 | } else if (!ibup && !dd->ibdeltainprog | ||
2506 | && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { | ||
2507 | dd->ibdeltainprog = 1; | ||
2508 | dd->ibsymsnap = ipath_read_creg32(dd, | ||
2509 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
2510 | dd->iblnkerrsnap = ipath_read_creg32(dd, | ||
2511 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
2512 | } | ||
2513 | |||
2443 | if (!ret) | 2514 | if (!ret) |
2444 | ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), | 2515 | ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), |
2445 | ltstate); | 2516 | ltstate); |
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 3e5baa43fc82..64aeefbd2a5d 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -229,6 +229,7 @@ static int init_chip_first(struct ipath_devdata *dd) | |||
229 | spin_lock_init(&dd->ipath_kernel_tid_lock); | 229 | spin_lock_init(&dd->ipath_kernel_tid_lock); |
230 | spin_lock_init(&dd->ipath_user_tid_lock); | 230 | spin_lock_init(&dd->ipath_user_tid_lock); |
231 | spin_lock_init(&dd->ipath_sendctrl_lock); | 231 | spin_lock_init(&dd->ipath_sendctrl_lock); |
232 | spin_lock_init(&dd->ipath_uctxt_lock); | ||
232 | spin_lock_init(&dd->ipath_sdma_lock); | 233 | spin_lock_init(&dd->ipath_sdma_lock); |
233 | spin_lock_init(&dd->ipath_gpio_lock); | 234 | spin_lock_init(&dd->ipath_gpio_lock); |
234 | spin_lock_init(&dd->ipath_eep_st_lock); | 235 | spin_lock_init(&dd->ipath_eep_st_lock); |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 0bd8bcb184a1..6ba4861dd6ac 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -355,6 +355,19 @@ struct ipath_devdata { | |||
355 | /* errors masked because they occur too fast */ | 355 | /* errors masked because they occur too fast */ |
356 | ipath_err_t ipath_maskederrs; | 356 | ipath_err_t ipath_maskederrs; |
357 | u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */ | 357 | u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */ |
358 | /* these 5 fields are used to establish deltas for IB Symbol | ||
359 | * errors and linkrecovery errors. They can be reported on | ||
360 | * some chips during link negotiation prior to INIT, and with | ||
361 | * DDR when faking DDR negotiations with non-IBTA switches. | ||
362 | * The chip counters are adjusted at driver unload if there is | ||
363 | * a non-zero delta. | ||
364 | */ | ||
365 | u64 ibdeltainprog; | ||
366 | u64 ibsymdelta; | ||
367 | u64 ibsymsnap; | ||
368 | u64 iblnkerrdelta; | ||
369 | u64 iblnkerrsnap; | ||
370 | |||
358 | /* time in jiffies at which to re-enable maskederrs */ | 371 | /* time in jiffies at which to re-enable maskederrs */ |
359 | unsigned long ipath_unmasktime; | 372 | unsigned long ipath_unmasktime; |
360 | /* count of egrfull errors, combined for all ports */ | 373 | /* count of egrfull errors, combined for all ports */ |
@@ -464,6 +477,8 @@ struct ipath_devdata { | |||
464 | spinlock_t ipath_kernel_tid_lock; | 477 | spinlock_t ipath_kernel_tid_lock; |
465 | spinlock_t ipath_user_tid_lock; | 478 | spinlock_t ipath_user_tid_lock; |
466 | spinlock_t ipath_sendctrl_lock; | 479 | spinlock_t ipath_sendctrl_lock; |
480 | /* around ipath_pd and (user ports) port_cnt use (intr vs free) */ | ||
481 | spinlock_t ipath_uctxt_lock; | ||
467 | 482 | ||
468 | /* | 483 | /* |
469 | * IPATH_STATUS_*, | 484 | * IPATH_STATUS_*, |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index 8f32b17a5eed..c0e933fec218 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -132,6 +132,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, | |||
132 | * (see ipath_get_dma_mr and ipath_dma.c). | 132 | * (see ipath_get_dma_mr and ipath_dma.c). |
133 | */ | 133 | */ |
134 | if (sge->lkey == 0) { | 134 | if (sge->lkey == 0) { |
135 | /* always a kernel port, no locking needed */ | ||
135 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); | 136 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); |
136 | 137 | ||
137 | if (pd->user) { | 138 | if (pd->user) { |
@@ -211,6 +212,7 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, | |||
211 | * (see ipath_get_dma_mr and ipath_dma.c). | 212 | * (see ipath_get_dma_mr and ipath_dma.c). |
212 | */ | 213 | */ |
213 | if (rkey == 0) { | 214 | if (rkey == 0) { |
215 | /* always a kernel port, no locking needed */ | ||
214 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); | 216 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); |
215 | 217 | ||
216 | if (pd->user) { | 218 | if (pd->user) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index be4fc9ada8e7..17a123197477 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -348,6 +348,7 @@ bail: | |||
348 | */ | 348 | */ |
349 | static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) | 349 | static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) |
350 | { | 350 | { |
351 | /* always a kernel port, no locking needed */ | ||
351 | struct ipath_portdata *pd = dd->ipath_pd[0]; | 352 | struct ipath_portdata *pd = dd->ipath_pd[0]; |
352 | 353 | ||
353 | memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); | 354 | memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); |
@@ -730,6 +731,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) | |||
730 | int i; | 731 | int i; |
731 | int changed = 0; | 732 | int changed = 0; |
732 | 733 | ||
734 | /* always a kernel port, no locking needed */ | ||
733 | pd = dd->ipath_pd[0]; | 735 | pd = dd->ipath_pd[0]; |
734 | 736 | ||
735 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | 737 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 4715911101e4..3a5a89b609c4 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -745,6 +745,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
745 | struct ipath_swqe *swq = NULL; | 745 | struct ipath_swqe *swq = NULL; |
746 | struct ipath_ibdev *dev; | 746 | struct ipath_ibdev *dev; |
747 | size_t sz; | 747 | size_t sz; |
748 | size_t sg_list_sz; | ||
748 | struct ib_qp *ret; | 749 | struct ib_qp *ret; |
749 | 750 | ||
750 | if (init_attr->create_flags) { | 751 | if (init_attr->create_flags) { |
@@ -789,19 +790,31 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
789 | goto bail; | 790 | goto bail; |
790 | } | 791 | } |
791 | sz = sizeof(*qp); | 792 | sz = sizeof(*qp); |
793 | sg_list_sz = 0; | ||
792 | if (init_attr->srq) { | 794 | if (init_attr->srq) { |
793 | struct ipath_srq *srq = to_isrq(init_attr->srq); | 795 | struct ipath_srq *srq = to_isrq(init_attr->srq); |
794 | 796 | ||
795 | sz += sizeof(*qp->r_sg_list) * | 797 | if (srq->rq.max_sge > 1) |
796 | srq->rq.max_sge; | 798 | sg_list_sz = sizeof(*qp->r_sg_list) * |
797 | } else | 799 | (srq->rq.max_sge - 1); |
798 | sz += sizeof(*qp->r_sg_list) * | 800 | } else if (init_attr->cap.max_recv_sge > 1) |
799 | init_attr->cap.max_recv_sge; | 801 | sg_list_sz = sizeof(*qp->r_sg_list) * |
800 | qp = kmalloc(sz, GFP_KERNEL); | 802 | (init_attr->cap.max_recv_sge - 1); |
803 | qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); | ||
801 | if (!qp) { | 804 | if (!qp) { |
802 | ret = ERR_PTR(-ENOMEM); | 805 | ret = ERR_PTR(-ENOMEM); |
803 | goto bail_swq; | 806 | goto bail_swq; |
804 | } | 807 | } |
808 | if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || | ||
809 | init_attr->qp_type == IB_QPT_SMI || | ||
810 | init_attr->qp_type == IB_QPT_GSI)) { | ||
811 | qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); | ||
812 | if (!qp->r_ud_sg_list) { | ||
813 | ret = ERR_PTR(-ENOMEM); | ||
814 | goto bail_qp; | ||
815 | } | ||
816 | } else | ||
817 | qp->r_ud_sg_list = NULL; | ||
805 | if (init_attr->srq) { | 818 | if (init_attr->srq) { |
806 | sz = 0; | 819 | sz = 0; |
807 | qp->r_rq.size = 0; | 820 | qp->r_rq.size = 0; |
@@ -818,7 +831,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
818 | qp->r_rq.size * sz); | 831 | qp->r_rq.size * sz); |
819 | if (!qp->r_rq.wq) { | 832 | if (!qp->r_rq.wq) { |
820 | ret = ERR_PTR(-ENOMEM); | 833 | ret = ERR_PTR(-ENOMEM); |
821 | goto bail_qp; | 834 | goto bail_sg_list; |
822 | } | 835 | } |
823 | } | 836 | } |
824 | 837 | ||
@@ -848,7 +861,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
848 | if (err) { | 861 | if (err) { |
849 | ret = ERR_PTR(err); | 862 | ret = ERR_PTR(err); |
850 | vfree(qp->r_rq.wq); | 863 | vfree(qp->r_rq.wq); |
851 | goto bail_qp; | 864 | goto bail_sg_list; |
852 | } | 865 | } |
853 | qp->ip = NULL; | 866 | qp->ip = NULL; |
854 | qp->s_tx = NULL; | 867 | qp->s_tx = NULL; |
@@ -925,6 +938,8 @@ bail_ip: | |||
925 | vfree(qp->r_rq.wq); | 938 | vfree(qp->r_rq.wq); |
926 | ipath_free_qp(&dev->qp_table, qp); | 939 | ipath_free_qp(&dev->qp_table, qp); |
927 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); | 940 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); |
941 | bail_sg_list: | ||
942 | kfree(qp->r_ud_sg_list); | ||
928 | bail_qp: | 943 | bail_qp: |
929 | kfree(qp); | 944 | kfree(qp); |
930 | bail_swq: | 945 | bail_swq: |
@@ -989,6 +1004,7 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
989 | kref_put(&qp->ip->ref, ipath_release_mmap_info); | 1004 | kref_put(&qp->ip->ref, ipath_release_mmap_info); |
990 | else | 1005 | else |
991 | vfree(qp->r_rq.wq); | 1006 | vfree(qp->r_rq.wq); |
1007 | kfree(qp->r_ud_sg_list); | ||
992 | vfree(qp->s_wq); | 1008 | vfree(qp->s_wq); |
993 | kfree(qp); | 1009 | kfree(qp); |
994 | return 0; | 1010 | return 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 7b93cda1a4bd..9170710b950d 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -573,9 +573,8 @@ int ipath_make_rc_req(struct ipath_qp *qp) | |||
573 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); | 573 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); |
574 | qp->s_state = OP(RDMA_READ_REQUEST); | 574 | qp->s_state = OP(RDMA_READ_REQUEST); |
575 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 575 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
576 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; | 576 | bth2 = qp->s_psn & IPATH_PSN_MASK; |
577 | if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) | 577 | qp->s_psn = wqe->lpsn + 1; |
578 | qp->s_next_psn = qp->s_psn; | ||
579 | ss = NULL; | 578 | ss = NULL; |
580 | len = 0; | 579 | len = 0; |
581 | qp->s_cur++; | 580 | qp->s_cur++; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 284c9bca517e..8e255adf5d9b 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -698,10 +698,8 @@ retry: | |||
698 | 698 | ||
699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, | 699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, |
700 | tx->map_len, DMA_TO_DEVICE); | 700 | tx->map_len, DMA_TO_DEVICE); |
701 | if (dma_mapping_error(&dd->pcidev->dev, addr)) { | 701 | if (dma_mapping_error(&dd->pcidev->dev, addr)) |
702 | ret = -EIO; | 702 | goto ioerr; |
703 | goto unlock; | ||
704 | } | ||
705 | 703 | ||
706 | dwoffset = tx->map_len >> 2; | 704 | dwoffset = tx->map_len >> 2; |
707 | make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); | 705 | make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); |
@@ -741,6 +739,8 @@ retry: | |||
741 | dw = (len + 3) >> 2; | 739 | dw = (len + 3) >> 2; |
742 | addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, | 740 | addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, |
743 | DMA_TO_DEVICE); | 741 | DMA_TO_DEVICE); |
742 | if (dma_mapping_error(&dd->pcidev->dev, addr)) | ||
743 | goto unmap; | ||
744 | make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); | 744 | make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); |
745 | /* SDmaUseLargeBuf has to be set in every descriptor */ | 745 | /* SDmaUseLargeBuf has to be set in every descriptor */ |
746 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) | 746 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) |
@@ -798,7 +798,18 @@ retry: | |||
798 | list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); | 798 | list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); |
799 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) | 799 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) |
800 | vl15_watchdog_enq(dd); | 800 | vl15_watchdog_enq(dd); |
801 | 801 | goto unlock; | |
802 | |||
803 | unmap: | ||
804 | while (tail != dd->ipath_sdma_descq_tail) { | ||
805 | if (!tail) | ||
806 | tail = dd->ipath_sdma_descq_cnt - 1; | ||
807 | else | ||
808 | tail--; | ||
809 | unmap_desc(dd, tail); | ||
810 | } | ||
811 | ioerr: | ||
812 | ret = -EIO; | ||
802 | unlock: | 813 | unlock: |
803 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | 814 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); |
804 | fail: | 815 | fail: |
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index c8e3d65f0de8..f63e143e3292 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c | |||
@@ -112,6 +112,14 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg) | |||
112 | dd->ipath_lastrpkts = val; | 112 | dd->ipath_lastrpkts = val; |
113 | } | 113 | } |
114 | val64 = dd->ipath_rpkts; | 114 | val64 = dd->ipath_rpkts; |
115 | } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) { | ||
116 | if (dd->ibdeltainprog) | ||
117 | val64 -= val64 - dd->ibsymsnap; | ||
118 | val64 -= dd->ibsymdelta; | ||
119 | } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) { | ||
120 | if (dd->ibdeltainprog) | ||
121 | val64 -= val64 - dd->iblnkerrsnap; | ||
122 | val64 -= dd->iblnkerrdelta; | ||
115 | } else | 123 | } else |
116 | val64 = (u64) val; | 124 | val64 = (u64) val; |
117 | 125 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 729446f56aab..91c74cc797ae 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -70,8 +70,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
70 | goto done; | 70 | goto done; |
71 | } | 71 | } |
72 | 72 | ||
73 | rsge.sg_list = NULL; | ||
74 | |||
75 | /* | 73 | /* |
76 | * Check that the qkey matches (except for QP0, see 9.6.1.4.1). | 74 | * Check that the qkey matches (except for QP0, see 9.6.1.4.1). |
77 | * Qkeys with the high order bit set mean use the | 75 | * Qkeys with the high order bit set mean use the |
@@ -115,21 +113,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
115 | rq = &qp->r_rq; | 113 | rq = &qp->r_rq; |
116 | } | 114 | } |
117 | 115 | ||
118 | if (rq->max_sge > 1) { | ||
119 | /* | ||
120 | * XXX We could use GFP_KERNEL if ipath_do_send() | ||
121 | * was always called from the tasklet instead of | ||
122 | * from ipath_post_send(). | ||
123 | */ | ||
124 | rsge.sg_list = kmalloc((rq->max_sge - 1) * | ||
125 | sizeof(struct ipath_sge), | ||
126 | GFP_ATOMIC); | ||
127 | if (!rsge.sg_list) { | ||
128 | dev->n_pkt_drops++; | ||
129 | goto drop; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | /* | 116 | /* |
134 | * Get the next work request entry to find where to put the data. | 117 | * Get the next work request entry to find where to put the data. |
135 | * Note that it is safe to drop the lock after changing rq->tail | 118 | * Note that it is safe to drop the lock after changing rq->tail |
@@ -147,6 +130,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
147 | goto drop; | 130 | goto drop; |
148 | } | 131 | } |
149 | wqe = get_rwqe_ptr(rq, tail); | 132 | wqe = get_rwqe_ptr(rq, tail); |
133 | rsge.sg_list = qp->r_ud_sg_list; | ||
150 | if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { | 134 | if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { |
151 | spin_unlock_irqrestore(&rq->lock, flags); | 135 | spin_unlock_irqrestore(&rq->lock, flags); |
152 | dev->n_pkt_drops++; | 136 | dev->n_pkt_drops++; |
@@ -242,7 +226,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
242 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 226 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
243 | swqe->wr.send_flags & IB_SEND_SOLICITED); | 227 | swqe->wr.send_flags & IB_SEND_SOLICITED); |
244 | drop: | 228 | drop: |
245 | kfree(rsge.sg_list); | ||
246 | if (atomic_dec_and_test(&qp->refcount)) | 229 | if (atomic_dec_and_test(&qp->refcount)) |
247 | wake_up(&qp->wait); | 230 | wake_up(&qp->wait); |
248 | done:; | 231 | done:; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index eabc4247860b..cdf0e6abd34d 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1852,7 +1852,7 @@ unsigned ipath_get_npkeys(struct ipath_devdata *dd) | |||
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | /** | 1854 | /** |
1855 | * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table | 1855 | * ipath_get_pkey - return the indexed PKEY from the port PKEY table |
1856 | * @dd: the infinipath device | 1856 | * @dd: the infinipath device |
1857 | * @index: the PKEY index | 1857 | * @index: the PKEY index |
1858 | */ | 1858 | */ |
@@ -1860,6 +1860,7 @@ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) | |||
1860 | { | 1860 | { |
1861 | unsigned ret; | 1861 | unsigned ret; |
1862 | 1862 | ||
1863 | /* always a kernel port, no locking needed */ | ||
1863 | if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) | 1864 | if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) |
1864 | ret = 0; | 1865 | ret = 0; |
1865 | else | 1866 | else |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 9d12ae8a778e..11e3f613df93 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -431,6 +431,7 @@ struct ipath_qp { | |||
431 | u32 s_lsn; /* limit sequence number (credit) */ | 431 | u32 s_lsn; /* limit sequence number (credit) */ |
432 | struct ipath_swqe *s_wq; /* send work queue */ | 432 | struct ipath_swqe *s_wq; /* send work queue */ |
433 | struct ipath_swqe *s_wqe; | 433 | struct ipath_swqe *s_wqe; |
434 | struct ipath_sge *r_ud_sg_list; | ||
434 | struct ipath_rq r_rq; /* receive work queue */ | 435 | struct ipath_rq r_rq; /* receive work queue */ |
435 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ | 436 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ |
436 | }; | 437 | }; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 18308494a195..8415ecce5c4c 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -222,7 +222,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
222 | } | 222 | } |
223 | 223 | ||
224 | err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, | 224 | err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, |
225 | cq->db.dma, &cq->mcq, 0); | 225 | cq->db.dma, &cq->mcq, vector, 0); |
226 | if (err) | 226 | if (err) |
227 | goto err_dbmap; | 227 | goto err_dbmap; |
228 | 228 | ||
@@ -325,15 +325,17 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) | |||
325 | 325 | ||
326 | static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) | 326 | static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) |
327 | { | 327 | { |
328 | struct mlx4_cqe *cqe; | 328 | struct mlx4_cqe *cqe, *new_cqe; |
329 | int i; | 329 | int i; |
330 | 330 | ||
331 | i = cq->mcq.cons_index; | 331 | i = cq->mcq.cons_index; |
332 | cqe = get_cqe(cq, i & cq->ibcq.cqe); | 332 | cqe = get_cqe(cq, i & cq->ibcq.cqe); |
333 | while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { | 333 | while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { |
334 | memcpy(get_cqe_from_buf(&cq->resize_buf->buf, | 334 | new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, |
335 | (i + 1) & cq->resize_buf->cqe), | 335 | (i + 1) & cq->resize_buf->cqe); |
336 | get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); | 336 | memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); |
337 | new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | | ||
338 | (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); | ||
337 | cqe = get_cqe(cq, ++i & cq->ibcq.cqe); | 339 | cqe = get_cqe(cq, ++i & cq->ibcq.cqe); |
338 | } | 340 | } |
339 | ++cq->mcq.cons_index; | 341 | ++cq->mcq.cons_index; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2e80f8f47b02..dcefe1fceb5c 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -578,7 +578,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
578 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 578 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
579 | ibdev->num_ports++; | 579 | ibdev->num_ports++; |
580 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | 580 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; |
581 | ibdev->ib_dev.num_comp_vectors = 1; | 581 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
582 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 582 | ibdev->ib_dev.dma_device = &dev->pdev->dev; |
583 | 583 | ||
584 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; | 584 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 1595dc7bba9d..13a5bb1a7bcf 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -137,14 +137,18 @@ | |||
137 | 137 | ||
138 | #ifdef CONFIG_INFINIBAND_NES_DEBUG | 138 | #ifdef CONFIG_INFINIBAND_NES_DEBUG |
139 | #define nes_debug(level, fmt, args...) \ | 139 | #define nes_debug(level, fmt, args...) \ |
140 | do { \ | ||
140 | if (level & nes_debug_level) \ | 141 | if (level & nes_debug_level) \ |
141 | printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args) | 142 | printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \ |
142 | 143 | } while (0) | |
143 | #define assert(expr) \ | 144 | |
144 | if (!(expr)) { \ | 145 | #define assert(expr) \ |
145 | printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ | 146 | do { \ |
146 | #expr, __FILE__, __func__, __LINE__); \ | 147 | if (!(expr)) { \ |
147 | } | 148 | printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ |
149 | #expr, __FILE__, __func__, __LINE__); \ | ||
150 | } \ | ||
151 | } while (0) | ||
148 | 152 | ||
149 | #define NES_EVENT_TIMEOUT 1200000 | 153 | #define NES_EVENT_TIMEOUT 1200000 |
150 | #else | 154 | #else |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 2854a6f7fdfe..a812db243477 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -86,15 +86,14 @@ static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, | |||
86 | struct nes_cm_node *); | 86 | struct nes_cm_node *); |
87 | static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, | 87 | static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, |
88 | struct nes_cm_node *); | 88 | struct nes_cm_node *); |
89 | static void mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, | 89 | static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, |
90 | struct sk_buff *); | 90 | struct sk_buff *); |
91 | static int mini_cm_dealloc_core(struct nes_cm_core *); | 91 | static int mini_cm_dealloc_core(struct nes_cm_core *); |
92 | static int mini_cm_get(struct nes_cm_core *); | 92 | static int mini_cm_get(struct nes_cm_core *); |
93 | static int mini_cm_set(struct nes_cm_core *, u32, u32); | 93 | static int mini_cm_set(struct nes_cm_core *, u32, u32); |
94 | 94 | ||
95 | static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *, | 95 | static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, |
96 | void *, u32, void *, u32, u8); | 96 | void *, u32, void *, u32, u8); |
97 | static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node); | ||
98 | static int add_ref_cm_node(struct nes_cm_node *); | 97 | static int add_ref_cm_node(struct nes_cm_node *); |
99 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); | 98 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); |
100 | 99 | ||
@@ -251,7 +250,7 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) | |||
251 | * form_cm_frame - get a free packet and build empty frame Use | 250 | * form_cm_frame - get a free packet and build empty frame Use |
252 | * node info to build. | 251 | * node info to build. |
253 | */ | 252 | */ |
254 | static struct sk_buff *form_cm_frame(struct sk_buff *skb, | 253 | static void form_cm_frame(struct sk_buff *skb, |
255 | struct nes_cm_node *cm_node, void *options, u32 optionsize, | 254 | struct nes_cm_node *cm_node, void *options, u32 optionsize, |
256 | void *data, u32 datasize, u8 flags) | 255 | void *data, u32 datasize, u8 flags) |
257 | { | 256 | { |
@@ -339,7 +338,6 @@ static struct sk_buff *form_cm_frame(struct sk_buff *skb, | |||
339 | skb_shinfo(skb)->nr_frags = 0; | 338 | skb_shinfo(skb)->nr_frags = 0; |
340 | cm_packets_created++; | 339 | cm_packets_created++; |
341 | 340 | ||
342 | return skb; | ||
343 | } | 341 | } |
344 | 342 | ||
345 | 343 | ||
@@ -356,7 +354,6 @@ static void print_core(struct nes_cm_core *core) | |||
356 | 354 | ||
357 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); | 355 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); |
358 | 356 | ||
359 | nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list)); | ||
360 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); | 357 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); |
361 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); | 358 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); |
362 | 359 | ||
@@ -381,8 +378,6 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
381 | int ret = 0; | 378 | int ret = 0; |
382 | u32 was_timer_set; | 379 | u32 was_timer_set; |
383 | 380 | ||
384 | if (!cm_node) | ||
385 | return -EINVAL; | ||
386 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | 381 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); |
387 | if (!new_send) | 382 | if (!new_send) |
388 | return -1; | 383 | return -1; |
@@ -459,13 +454,23 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
459 | int ret = NETDEV_TX_OK; | 454 | int ret = NETDEV_TX_OK; |
460 | enum nes_cm_node_state last_state; | 455 | enum nes_cm_node_state last_state; |
461 | 456 | ||
457 | struct list_head timer_list; | ||
458 | INIT_LIST_HEAD(&timer_list); | ||
462 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 459 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
463 | 460 | ||
464 | list_for_each_safe(list_node, list_core_temp, | 461 | list_for_each_safe(list_node, list_core_temp, |
465 | &cm_core->connected_nodes) { | 462 | &cm_core->connected_nodes) { |
466 | cm_node = container_of(list_node, struct nes_cm_node, list); | 463 | cm_node = container_of(list_node, struct nes_cm_node, list); |
467 | add_ref_cm_node(cm_node); | 464 | if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) { |
468 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | 465 | add_ref_cm_node(cm_node); |
466 | list_add(&cm_node->timer_entry, &timer_list); | ||
467 | } | ||
468 | } | ||
469 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
470 | |||
471 | list_for_each_safe(list_node, list_core_temp, &timer_list) { | ||
472 | cm_node = container_of(list_node, struct nes_cm_node, | ||
473 | timer_entry); | ||
469 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | 474 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); |
470 | list_for_each_safe(list_core, list_node_temp, | 475 | list_for_each_safe(list_core, list_node_temp, |
471 | &cm_node->recv_list) { | 476 | &cm_node->recv_list) { |
@@ -519,7 +524,7 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
519 | do { | 524 | do { |
520 | send_entry = cm_node->send_entry; | 525 | send_entry = cm_node->send_entry; |
521 | if (!send_entry) | 526 | if (!send_entry) |
522 | continue; | 527 | break; |
523 | if (time_after(send_entry->timetosend, jiffies)) { | 528 | if (time_after(send_entry->timetosend, jiffies)) { |
524 | if (cm_node->state != NES_CM_STATE_TSA) { | 529 | if (cm_node->state != NES_CM_STATE_TSA) { |
525 | if ((nexttimeout > | 530 | if ((nexttimeout > |
@@ -528,18 +533,18 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
528 | nexttimeout = | 533 | nexttimeout = |
529 | send_entry->timetosend; | 534 | send_entry->timetosend; |
530 | settimer = 1; | 535 | settimer = 1; |
531 | continue; | 536 | break; |
532 | } | 537 | } |
533 | } else { | 538 | } else { |
534 | free_retrans_entry(cm_node); | 539 | free_retrans_entry(cm_node); |
535 | continue; | 540 | break; |
536 | } | 541 | } |
537 | } | 542 | } |
538 | 543 | ||
539 | if ((cm_node->state == NES_CM_STATE_TSA) || | 544 | if ((cm_node->state == NES_CM_STATE_TSA) || |
540 | (cm_node->state == NES_CM_STATE_CLOSED)) { | 545 | (cm_node->state == NES_CM_STATE_CLOSED)) { |
541 | free_retrans_entry(cm_node); | 546 | free_retrans_entry(cm_node); |
542 | continue; | 547 | break; |
543 | } | 548 | } |
544 | 549 | ||
545 | if (!send_entry->retranscount || | 550 | if (!send_entry->retranscount || |
@@ -557,7 +562,7 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
557 | NES_CM_EVENT_ABORTED); | 562 | NES_CM_EVENT_ABORTED); |
558 | spin_lock_irqsave(&cm_node->retrans_list_lock, | 563 | spin_lock_irqsave(&cm_node->retrans_list_lock, |
559 | flags); | 564 | flags); |
560 | continue; | 565 | break; |
561 | } | 566 | } |
562 | atomic_inc(&send_entry->skb->users); | 567 | atomic_inc(&send_entry->skb->users); |
563 | cm_packets_retrans++; | 568 | cm_packets_retrans++; |
@@ -583,7 +588,7 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
583 | send_entry->retrycount--; | 588 | send_entry->retrycount--; |
584 | nexttimeout = jiffies + NES_SHORT_TIME; | 589 | nexttimeout = jiffies + NES_SHORT_TIME; |
585 | settimer = 1; | 590 | settimer = 1; |
586 | continue; | 591 | break; |
587 | } else { | 592 | } else { |
588 | cm_packets_sent++; | 593 | cm_packets_sent++; |
589 | } | 594 | } |
@@ -615,14 +620,12 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
615 | 620 | ||
616 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | 621 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); |
617 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 622 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
618 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
619 | if (ret != NETDEV_TX_OK) { | 623 | if (ret != NETDEV_TX_OK) { |
620 | nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n", | 624 | nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n", |
621 | cm_node); | 625 | cm_node); |
622 | break; | 626 | break; |
623 | } | 627 | } |
624 | } | 628 | } |
625 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
626 | 629 | ||
627 | if (settimer) { | 630 | if (settimer) { |
628 | if (!timer_pending(&cm_core->tcp_timer)) { | 631 | if (!timer_pending(&cm_core->tcp_timer)) { |
@@ -683,7 +686,7 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack, | |||
683 | optionssize += 1; | 686 | optionssize += 1; |
684 | 687 | ||
685 | if (!skb) | 688 | if (!skb) |
686 | skb = get_free_pkt(cm_node); | 689 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
687 | if (!skb) { | 690 | if (!skb) { |
688 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | 691 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); |
689 | return -1; | 692 | return -1; |
@@ -708,7 +711,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
708 | int flags = SET_RST | SET_ACK; | 711 | int flags = SET_RST | SET_ACK; |
709 | 712 | ||
710 | if (!skb) | 713 | if (!skb) |
711 | skb = get_free_pkt(cm_node); | 714 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
712 | if (!skb) { | 715 | if (!skb) { |
713 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | 716 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); |
714 | return -1; | 717 | return -1; |
@@ -729,7 +732,7 @@ static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
729 | int ret; | 732 | int ret; |
730 | 733 | ||
731 | if (!skb) | 734 | if (!skb) |
732 | skb = get_free_pkt(cm_node); | 735 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
733 | 736 | ||
734 | if (!skb) { | 737 | if (!skb) { |
735 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | 738 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); |
@@ -752,7 +755,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
752 | 755 | ||
753 | /* if we didn't get a frame get one */ | 756 | /* if we didn't get a frame get one */ |
754 | if (!skb) | 757 | if (!skb) |
755 | skb = get_free_pkt(cm_node); | 758 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
756 | 759 | ||
757 | if (!skb) { | 760 | if (!skb) { |
758 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | 761 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); |
@@ -767,59 +770,15 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
767 | 770 | ||
768 | 771 | ||
769 | /** | 772 | /** |
770 | * get_free_pkt | ||
771 | */ | ||
772 | static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node) | ||
773 | { | ||
774 | struct sk_buff *skb, *new_skb; | ||
775 | |||
776 | /* check to see if we need to repopulate the free tx pkt queue */ | ||
777 | if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) { | ||
778 | while (skb_queue_len(&cm_node->cm_core->tx_free_list) < | ||
779 | cm_node->cm_core->free_tx_pkt_max) { | ||
780 | /* replace the frame we took, we won't get it back */ | ||
781 | new_skb = dev_alloc_skb(cm_node->cm_core->mtu); | ||
782 | BUG_ON(!new_skb); | ||
783 | /* add a replacement frame to the free tx list head */ | ||
784 | skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb); | ||
785 | } | ||
786 | } | ||
787 | |||
788 | skb = skb_dequeue(&cm_node->cm_core->tx_free_list); | ||
789 | |||
790 | return skb; | ||
791 | } | ||
792 | |||
793 | |||
794 | /** | ||
795 | * make_hashkey - generate hash key from node tuple | ||
796 | */ | ||
797 | static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port, | ||
798 | nes_addr_t rem_addr) | ||
799 | { | ||
800 | u32 hashkey = 0; | ||
801 | |||
802 | hashkey = loc_addr + rem_addr + loc_port + rem_port; | ||
803 | hashkey = (hashkey % NES_CM_HASHTABLE_SIZE); | ||
804 | |||
805 | return hashkey; | ||
806 | } | ||
807 | |||
808 | |||
809 | /** | ||
810 | * find_node - find a cm node that matches the reference cm node | 773 | * find_node - find a cm node that matches the reference cm node |
811 | */ | 774 | */ |
812 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | 775 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, |
813 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) | 776 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) |
814 | { | 777 | { |
815 | unsigned long flags; | 778 | unsigned long flags; |
816 | u32 hashkey; | ||
817 | struct list_head *hte; | 779 | struct list_head *hte; |
818 | struct nes_cm_node *cm_node; | 780 | struct nes_cm_node *cm_node; |
819 | 781 | ||
820 | /* make a hash index key for this packet */ | ||
821 | hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr); | ||
822 | |||
823 | /* get a handle on the hte */ | 782 | /* get a handle on the hte */ |
824 | hte = &cm_core->connected_nodes; | 783 | hte = &cm_core->connected_nodes; |
825 | 784 | ||
@@ -887,7 +846,6 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
887 | static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) | 846 | static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) |
888 | { | 847 | { |
889 | unsigned long flags; | 848 | unsigned long flags; |
890 | u32 hashkey; | ||
891 | struct list_head *hte; | 849 | struct list_head *hte; |
892 | 850 | ||
893 | if (!cm_node || !cm_core) | 851 | if (!cm_node || !cm_core) |
@@ -896,11 +854,6 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node | |||
896 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", | 854 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", |
897 | cm_node); | 855 | cm_node); |
898 | 856 | ||
899 | /* first, make an index into our hash table */ | ||
900 | hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr, | ||
901 | cm_node->rem_port, cm_node->rem_addr); | ||
902 | cm_node->hashkey = hashkey; | ||
903 | |||
904 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 857 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
905 | 858 | ||
906 | /* get a handle on the hash table element (list head for this slot) */ | 859 | /* get a handle on the hash table element (list head for this slot) */ |
@@ -925,28 +878,36 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
925 | struct list_head *list_pos = NULL; | 878 | struct list_head *list_pos = NULL; |
926 | struct list_head *list_temp = NULL; | 879 | struct list_head *list_temp = NULL; |
927 | struct nes_cm_node *cm_node = NULL; | 880 | struct nes_cm_node *cm_node = NULL; |
881 | struct list_head reset_list; | ||
928 | 882 | ||
929 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " | 883 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " |
930 | "refcnt=%d\n", listener, free_hanging_nodes, | 884 | "refcnt=%d\n", listener, free_hanging_nodes, |
931 | atomic_read(&listener->ref_count)); | 885 | atomic_read(&listener->ref_count)); |
932 | /* free non-accelerated child nodes for this listener */ | 886 | /* free non-accelerated child nodes for this listener */ |
887 | INIT_LIST_HEAD(&reset_list); | ||
933 | if (free_hanging_nodes) { | 888 | if (free_hanging_nodes) { |
934 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 889 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
935 | list_for_each_safe(list_pos, list_temp, | 890 | list_for_each_safe(list_pos, list_temp, |
936 | &g_cm_core->connected_nodes) { | 891 | &g_cm_core->connected_nodes) { |
937 | cm_node = container_of(list_pos, struct nes_cm_node, | 892 | cm_node = container_of(list_pos, struct nes_cm_node, |
938 | list); | 893 | list); |
939 | if ((cm_node->listener == listener) && | 894 | if ((cm_node->listener == listener) && |
940 | (!cm_node->accelerated)) { | 895 | (!cm_node->accelerated)) { |
941 | cleanup_retrans_entry(cm_node); | 896 | add_ref_cm_node(cm_node); |
942 | spin_unlock_irqrestore(&cm_core->ht_lock, | 897 | list_add(&cm_node->reset_entry, &reset_list); |
943 | flags); | ||
944 | send_reset(cm_node, NULL); | ||
945 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
946 | } | 898 | } |
947 | } | 899 | } |
948 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | 900 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); |
949 | } | 901 | } |
902 | |||
903 | list_for_each_safe(list_pos, list_temp, &reset_list) { | ||
904 | cm_node = container_of(list_pos, struct nes_cm_node, | ||
905 | reset_entry); | ||
906 | cleanup_retrans_entry(cm_node); | ||
907 | send_reset(cm_node, NULL); | ||
908 | rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
909 | } | ||
910 | |||
950 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | 911 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); |
951 | if (!atomic_dec_return(&listener->ref_count)) { | 912 | if (!atomic_dec_return(&listener->ref_count)) { |
952 | list_del(&listener->list); | 913 | list_del(&listener->list); |
@@ -1123,7 +1084,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1123 | 1084 | ||
1124 | cm_node->loopbackpartner = NULL; | 1085 | cm_node->loopbackpartner = NULL; |
1125 | /* get the mac addr for the remote node */ | 1086 | /* get the mac addr for the remote node */ |
1126 | arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | 1087 | if (ipv4_is_loopback(htonl(cm_node->rem_addr))) |
1088 | arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); | ||
1089 | else | ||
1090 | arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | ||
1127 | if (arpindex < 0) { | 1091 | if (arpindex < 0) { |
1128 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr); | 1092 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr); |
1129 | if (arpindex < 0) { | 1093 | if (arpindex < 0) { |
@@ -1303,7 +1267,6 @@ static void drop_packet(struct sk_buff *skb) | |||
1303 | static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1267 | static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1304 | struct tcphdr *tcph) | 1268 | struct tcphdr *tcph) |
1305 | { | 1269 | { |
1306 | atomic_inc(&cm_resets_recvd); | ||
1307 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " | 1270 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " |
1308 | "refcnt=%d\n", cm_node, cm_node->state, | 1271 | "refcnt=%d\n", cm_node, cm_node->state, |
1309 | atomic_read(&cm_node->ref_count)); | 1272 | atomic_read(&cm_node->ref_count)); |
@@ -1341,6 +1304,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1341 | { | 1304 | { |
1342 | 1305 | ||
1343 | int reset = 0; /* whether to send reset in case of err.. */ | 1306 | int reset = 0; /* whether to send reset in case of err.. */ |
1307 | int passive_state; | ||
1344 | atomic_inc(&cm_resets_recvd); | 1308 | atomic_inc(&cm_resets_recvd); |
1345 | nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." | 1309 | nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." |
1346 | " refcnt=%d\n", cm_node, cm_node->state, | 1310 | " refcnt=%d\n", cm_node, cm_node->state, |
@@ -1354,7 +1318,14 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1354 | cm_node->listener, cm_node->state); | 1318 | cm_node->listener, cm_node->state); |
1355 | active_open_err(cm_node, skb, reset); | 1319 | active_open_err(cm_node, skb, reset); |
1356 | break; | 1320 | break; |
1357 | /* For PASSIVE open states, remove the cm_node event */ | 1321 | case NES_CM_STATE_MPAREQ_RCVD: |
1322 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
1323 | if (passive_state == NES_SEND_RESET_EVENT) | ||
1324 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
1325 | cleanup_retrans_entry(cm_node); | ||
1326 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1327 | dev_kfree_skb_any(skb); | ||
1328 | break; | ||
1358 | case NES_CM_STATE_ESTABLISHED: | 1329 | case NES_CM_STATE_ESTABLISHED: |
1359 | case NES_CM_STATE_SYN_RCVD: | 1330 | case NES_CM_STATE_SYN_RCVD: |
1360 | case NES_CM_STATE_LISTENING: | 1331 | case NES_CM_STATE_LISTENING: |
@@ -1362,7 +1333,14 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1362 | passive_open_err(cm_node, skb, reset); | 1333 | passive_open_err(cm_node, skb, reset); |
1363 | break; | 1334 | break; |
1364 | case NES_CM_STATE_TSA: | 1335 | case NES_CM_STATE_TSA: |
1336 | active_open_err(cm_node, skb, reset); | ||
1337 | break; | ||
1338 | case NES_CM_STATE_CLOSED: | ||
1339 | cleanup_retrans_entry(cm_node); | ||
1340 | drop_packet(skb); | ||
1341 | break; | ||
1365 | default: | 1342 | default: |
1343 | drop_packet(skb); | ||
1366 | break; | 1344 | break; |
1367 | } | 1345 | } |
1368 | } | 1346 | } |
@@ -1391,6 +1369,9 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1391 | dev_kfree_skb_any(skb); | 1369 | dev_kfree_skb_any(skb); |
1392 | if (type == NES_CM_EVENT_CONNECTED) | 1370 | if (type == NES_CM_EVENT_CONNECTED) |
1393 | cm_node->state = NES_CM_STATE_TSA; | 1371 | cm_node->state = NES_CM_STATE_TSA; |
1372 | else | ||
1373 | atomic_set(&cm_node->passive_state, | ||
1374 | NES_PASSIVE_STATE_INDICATED); | ||
1394 | create_event(cm_node, type); | 1375 | create_event(cm_node, type); |
1395 | 1376 | ||
1396 | } | 1377 | } |
@@ -1471,7 +1452,7 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1471 | int optionsize; | 1452 | int optionsize; |
1472 | 1453 | ||
1473 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | 1454 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); |
1474 | skb_pull(skb, tcph->doff << 2); | 1455 | skb_trim(skb, 0); |
1475 | inc_sequence = ntohl(tcph->seq); | 1456 | inc_sequence = ntohl(tcph->seq); |
1476 | 1457 | ||
1477 | switch (cm_node->state) { | 1458 | switch (cm_node->state) { |
@@ -1504,6 +1485,10 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1504 | cm_node->state = NES_CM_STATE_SYN_RCVD; | 1485 | cm_node->state = NES_CM_STATE_SYN_RCVD; |
1505 | send_syn(cm_node, 1, skb); | 1486 | send_syn(cm_node, 1, skb); |
1506 | break; | 1487 | break; |
1488 | case NES_CM_STATE_CLOSED: | ||
1489 | cleanup_retrans_entry(cm_node); | ||
1490 | send_reset(cm_node, skb); | ||
1491 | break; | ||
1507 | case NES_CM_STATE_TSA: | 1492 | case NES_CM_STATE_TSA: |
1508 | case NES_CM_STATE_ESTABLISHED: | 1493 | case NES_CM_STATE_ESTABLISHED: |
1509 | case NES_CM_STATE_FIN_WAIT1: | 1494 | case NES_CM_STATE_FIN_WAIT1: |
@@ -1512,7 +1497,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1512 | case NES_CM_STATE_LAST_ACK: | 1497 | case NES_CM_STATE_LAST_ACK: |
1513 | case NES_CM_STATE_CLOSING: | 1498 | case NES_CM_STATE_CLOSING: |
1514 | case NES_CM_STATE_UNKNOWN: | 1499 | case NES_CM_STATE_UNKNOWN: |
1515 | case NES_CM_STATE_CLOSED: | ||
1516 | default: | 1500 | default: |
1517 | drop_packet(skb); | 1501 | drop_packet(skb); |
1518 | break; | 1502 | break; |
@@ -1528,7 +1512,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1528 | int optionsize; | 1512 | int optionsize; |
1529 | 1513 | ||
1530 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | 1514 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); |
1531 | skb_pull(skb, tcph->doff << 2); | 1515 | skb_trim(skb, 0); |
1532 | inc_sequence = ntohl(tcph->seq); | 1516 | inc_sequence = ntohl(tcph->seq); |
1533 | switch (cm_node->state) { | 1517 | switch (cm_node->state) { |
1534 | case NES_CM_STATE_SYN_SENT: | 1518 | case NES_CM_STATE_SYN_SENT: |
@@ -1552,6 +1536,12 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1552 | /* passive open, so should not be here */ | 1536 | /* passive open, so should not be here */ |
1553 | passive_open_err(cm_node, skb, 1); | 1537 | passive_open_err(cm_node, skb, 1); |
1554 | break; | 1538 | break; |
1539 | case NES_CM_STATE_LISTENING: | ||
1540 | case NES_CM_STATE_CLOSED: | ||
1541 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | ||
1542 | cleanup_retrans_entry(cm_node); | ||
1543 | send_reset(cm_node, skb); | ||
1544 | break; | ||
1555 | case NES_CM_STATE_ESTABLISHED: | 1545 | case NES_CM_STATE_ESTABLISHED: |
1556 | case NES_CM_STATE_FIN_WAIT1: | 1546 | case NES_CM_STATE_FIN_WAIT1: |
1557 | case NES_CM_STATE_FIN_WAIT2: | 1547 | case NES_CM_STATE_FIN_WAIT2: |
@@ -1559,7 +1549,6 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1559 | case NES_CM_STATE_TSA: | 1549 | case NES_CM_STATE_TSA: |
1560 | case NES_CM_STATE_CLOSING: | 1550 | case NES_CM_STATE_CLOSING: |
1561 | case NES_CM_STATE_UNKNOWN: | 1551 | case NES_CM_STATE_UNKNOWN: |
1562 | case NES_CM_STATE_CLOSED: | ||
1563 | case NES_CM_STATE_MPAREQ_SENT: | 1552 | case NES_CM_STATE_MPAREQ_SENT: |
1564 | default: | 1553 | default: |
1565 | drop_packet(skb); | 1554 | drop_packet(skb); |
@@ -1574,6 +1563,13 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1574 | u32 inc_sequence; | 1563 | u32 inc_sequence; |
1575 | u32 rem_seq_ack; | 1564 | u32 rem_seq_ack; |
1576 | u32 rem_seq; | 1565 | u32 rem_seq; |
1566 | int ret; | ||
1567 | int optionsize; | ||
1568 | u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num; | ||
1569 | |||
1570 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | ||
1571 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | ||
1572 | |||
1577 | if (check_seq(cm_node, tcph, skb)) | 1573 | if (check_seq(cm_node, tcph, skb)) |
1578 | return; | 1574 | return; |
1579 | 1575 | ||
@@ -1586,7 +1582,18 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1586 | switch (cm_node->state) { | 1582 | switch (cm_node->state) { |
1587 | case NES_CM_STATE_SYN_RCVD: | 1583 | case NES_CM_STATE_SYN_RCVD: |
1588 | /* Passive OPEN */ | 1584 | /* Passive OPEN */ |
1585 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1); | ||
1586 | if (ret) | ||
1587 | break; | ||
1589 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | 1588 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); |
1589 | cm_node->tcp_cntxt.loc_seq_num = temp_seq; | ||
1590 | if (cm_node->tcp_cntxt.rem_ack_num != | ||
1591 | cm_node->tcp_cntxt.loc_seq_num) { | ||
1592 | nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n"); | ||
1593 | cleanup_retrans_entry(cm_node); | ||
1594 | send_reset(cm_node, skb); | ||
1595 | return; | ||
1596 | } | ||
1590 | cm_node->state = NES_CM_STATE_ESTABLISHED; | 1597 | cm_node->state = NES_CM_STATE_ESTABLISHED; |
1591 | if (datasize) { | 1598 | if (datasize) { |
1592 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 1599 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
@@ -1618,11 +1625,15 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1618 | dev_kfree_skb_any(skb); | 1625 | dev_kfree_skb_any(skb); |
1619 | } | 1626 | } |
1620 | break; | 1627 | break; |
1628 | case NES_CM_STATE_LISTENING: | ||
1629 | case NES_CM_STATE_CLOSED: | ||
1630 | cleanup_retrans_entry(cm_node); | ||
1631 | send_reset(cm_node, skb); | ||
1632 | break; | ||
1621 | case NES_CM_STATE_FIN_WAIT1: | 1633 | case NES_CM_STATE_FIN_WAIT1: |
1622 | case NES_CM_STATE_SYN_SENT: | 1634 | case NES_CM_STATE_SYN_SENT: |
1623 | case NES_CM_STATE_FIN_WAIT2: | 1635 | case NES_CM_STATE_FIN_WAIT2: |
1624 | case NES_CM_STATE_TSA: | 1636 | case NES_CM_STATE_TSA: |
1625 | case NES_CM_STATE_CLOSED: | ||
1626 | case NES_CM_STATE_MPAREQ_RCVD: | 1637 | case NES_CM_STATE_MPAREQ_RCVD: |
1627 | case NES_CM_STATE_LAST_ACK: | 1638 | case NES_CM_STATE_LAST_ACK: |
1628 | case NES_CM_STATE_CLOSING: | 1639 | case NES_CM_STATE_CLOSING: |
@@ -1645,9 +1656,9 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1645 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", | 1656 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", |
1646 | __func__, cm_node); | 1657 | __func__, cm_node); |
1647 | if (passive) | 1658 | if (passive) |
1648 | passive_open_err(cm_node, skb, 0); | 1659 | passive_open_err(cm_node, skb, 1); |
1649 | else | 1660 | else |
1650 | active_open_err(cm_node, skb, 0); | 1661 | active_open_err(cm_node, skb, 1); |
1651 | return 1; | 1662 | return 1; |
1652 | } | 1663 | } |
1653 | } | 1664 | } |
@@ -1967,6 +1978,7 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
1967 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) | 1978 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) |
1968 | { | 1979 | { |
1969 | int ret = 0; | 1980 | int ret = 0; |
1981 | int passive_state; | ||
1970 | 1982 | ||
1971 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", | 1983 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", |
1972 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); | 1984 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); |
@@ -1974,9 +1986,13 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
1974 | if (cm_node->tcp_cntxt.client) | 1986 | if (cm_node->tcp_cntxt.client) |
1975 | return ret; | 1987 | return ret; |
1976 | cleanup_retrans_entry(cm_node); | 1988 | cleanup_retrans_entry(cm_node); |
1977 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1978 | 1989 | ||
1979 | ret = send_reset(cm_node, NULL); | 1990 | passive_state = atomic_add_return(1, &cm_node->passive_state); |
1991 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1992 | if (passive_state == NES_SEND_RESET_EVENT) | ||
1993 | rem_ref_cm_node(cm_core, cm_node); | ||
1994 | else | ||
1995 | ret = send_reset(cm_node, NULL); | ||
1980 | return ret; | 1996 | return ret; |
1981 | } | 1997 | } |
1982 | 1998 | ||
@@ -2034,7 +2050,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod | |||
2034 | * recv_pkt - recv an ETHERNET packet, and process it through CM | 2050 | * recv_pkt - recv an ETHERNET packet, and process it through CM |
2035 | * node state machine | 2051 | * node state machine |
2036 | */ | 2052 | */ |
2037 | static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, | 2053 | static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, |
2038 | struct nes_vnic *nesvnic, struct sk_buff *skb) | 2054 | struct nes_vnic *nesvnic, struct sk_buff *skb) |
2039 | { | 2055 | { |
2040 | struct nes_cm_node *cm_node = NULL; | 2056 | struct nes_cm_node *cm_node = NULL; |
@@ -2042,23 +2058,16 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2042 | struct iphdr *iph; | 2058 | struct iphdr *iph; |
2043 | struct tcphdr *tcph; | 2059 | struct tcphdr *tcph; |
2044 | struct nes_cm_info nfo; | 2060 | struct nes_cm_info nfo; |
2061 | int skb_handled = 1; | ||
2045 | 2062 | ||
2046 | if (!skb) | 2063 | if (!skb) |
2047 | return; | 2064 | return 0; |
2048 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { | 2065 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { |
2049 | dev_kfree_skb_any(skb); | 2066 | return 0; |
2050 | return; | ||
2051 | } | 2067 | } |
2052 | 2068 | ||
2053 | iph = (struct iphdr *)skb->data; | 2069 | iph = (struct iphdr *)skb->data; |
2054 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); | 2070 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); |
2055 | skb_reset_network_header(skb); | ||
2056 | skb_set_transport_header(skb, sizeof(*tcph)); | ||
2057 | if (!tcph) { | ||
2058 | dev_kfree_skb_any(skb); | ||
2059 | return; | ||
2060 | } | ||
2061 | skb->len = ntohs(iph->tot_len); | ||
2062 | 2071 | ||
2063 | nfo.loc_addr = ntohl(iph->daddr); | 2072 | nfo.loc_addr = ntohl(iph->daddr); |
2064 | nfo.loc_port = ntohs(tcph->dest); | 2073 | nfo.loc_port = ntohs(tcph->dest); |
@@ -2077,23 +2086,21 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2077 | /* Only type of packet accepted are for */ | 2086 | /* Only type of packet accepted are for */ |
2078 | /* the PASSIVE open (syn only) */ | 2087 | /* the PASSIVE open (syn only) */ |
2079 | if ((!tcph->syn) || (tcph->ack)) { | 2088 | if ((!tcph->syn) || (tcph->ack)) { |
2080 | cm_packets_dropped++; | 2089 | skb_handled = 0; |
2081 | break; | 2090 | break; |
2082 | } | 2091 | } |
2083 | listener = find_listener(cm_core, nfo.loc_addr, | 2092 | listener = find_listener(cm_core, nfo.loc_addr, |
2084 | nfo.loc_port, | 2093 | nfo.loc_port, |
2085 | NES_CM_LISTENER_ACTIVE_STATE); | 2094 | NES_CM_LISTENER_ACTIVE_STATE); |
2086 | if (listener) { | 2095 | if (!listener) { |
2087 | nfo.cm_id = listener->cm_id; | 2096 | nfo.cm_id = NULL; |
2088 | nfo.conn_type = listener->conn_type; | 2097 | nfo.conn_type = 0; |
2089 | } else { | 2098 | nes_debug(NES_DBG_CM, "Unable to find listener for the pkt\n"); |
2090 | nes_debug(NES_DBG_CM, "Unable to find listener " | 2099 | skb_handled = 0; |
2091 | "for the pkt\n"); | ||
2092 | cm_packets_dropped++; | ||
2093 | dev_kfree_skb_any(skb); | ||
2094 | break; | 2100 | break; |
2095 | } | 2101 | } |
2096 | 2102 | nfo.cm_id = listener->cm_id; | |
2103 | nfo.conn_type = listener->conn_type; | ||
2097 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, | 2104 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, |
2098 | listener); | 2105 | listener); |
2099 | if (!cm_node) { | 2106 | if (!cm_node) { |
@@ -2119,9 +2126,13 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2119 | dev_kfree_skb_any(skb); | 2126 | dev_kfree_skb_any(skb); |
2120 | break; | 2127 | break; |
2121 | } | 2128 | } |
2129 | skb_reset_network_header(skb); | ||
2130 | skb_set_transport_header(skb, sizeof(*tcph)); | ||
2131 | skb->len = ntohs(iph->tot_len); | ||
2122 | process_packet(cm_node, skb, cm_core); | 2132 | process_packet(cm_node, skb, cm_core); |
2123 | rem_ref_cm_node(cm_core, cm_node); | 2133 | rem_ref_cm_node(cm_core, cm_node); |
2124 | } while (0); | 2134 | } while (0); |
2135 | return skb_handled; | ||
2125 | } | 2136 | } |
2126 | 2137 | ||
2127 | 2138 | ||
@@ -2130,10 +2141,7 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2130 | */ | 2141 | */ |
2131 | static struct nes_cm_core *nes_cm_alloc_core(void) | 2142 | static struct nes_cm_core *nes_cm_alloc_core(void) |
2132 | { | 2143 | { |
2133 | int i; | ||
2134 | |||
2135 | struct nes_cm_core *cm_core; | 2144 | struct nes_cm_core *cm_core; |
2136 | struct sk_buff *skb = NULL; | ||
2137 | 2145 | ||
2138 | /* setup the CM core */ | 2146 | /* setup the CM core */ |
2139 | /* alloc top level core control structure */ | 2147 | /* alloc top level core control structure */ |
@@ -2151,19 +2159,6 @@ static struct nes_cm_core *nes_cm_alloc_core(void) | |||
2151 | 2159 | ||
2152 | atomic_set(&cm_core->events_posted, 0); | 2160 | atomic_set(&cm_core->events_posted, 0); |
2153 | 2161 | ||
2154 | /* init the packet lists */ | ||
2155 | skb_queue_head_init(&cm_core->tx_free_list); | ||
2156 | |||
2157 | for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) { | ||
2158 | skb = dev_alloc_skb(cm_core->mtu); | ||
2159 | if (!skb) { | ||
2160 | kfree(cm_core); | ||
2161 | return NULL; | ||
2162 | } | ||
2163 | /* add 'raw' skb to free frame list */ | ||
2164 | skb_queue_head(&cm_core->tx_free_list, skb); | ||
2165 | } | ||
2166 | |||
2167 | cm_core->api = &nes_cm_api; | 2162 | cm_core->api = &nes_cm_api; |
2168 | 2163 | ||
2169 | spin_lock_init(&cm_core->ht_lock); | 2164 | spin_lock_init(&cm_core->ht_lock); |
@@ -2392,7 +2387,6 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2392 | atomic_inc(&cm_disconnects); | 2387 | atomic_inc(&cm_disconnects); |
2393 | cm_event.event = IW_CM_EVENT_DISCONNECT; | 2388 | cm_event.event = IW_CM_EVENT_DISCONNECT; |
2394 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { | 2389 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { |
2395 | issued_disconnect_reset = 1; | ||
2396 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | 2390 | cm_event.status = IW_CM_EVENT_STATUS_RESET; |
2397 | nes_debug(NES_DBG_CM, "Generating a CM " | 2391 | nes_debug(NES_DBG_CM, "Generating a CM " |
2398 | "Disconnect Event (status reset) for " | 2392 | "Disconnect Event (status reset) for " |
@@ -2542,6 +2536,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2542 | struct nes_v4_quad nes_quad; | 2536 | struct nes_v4_quad nes_quad; |
2543 | u32 crc_value; | 2537 | u32 crc_value; |
2544 | int ret; | 2538 | int ret; |
2539 | int passive_state; | ||
2545 | 2540 | ||
2546 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 2541 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
2547 | if (!ibqp) | 2542 | if (!ibqp) |
@@ -2709,8 +2704,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2709 | conn_param->private_data_len + | 2704 | conn_param->private_data_len + |
2710 | sizeof(struct ietf_mpa_frame)); | 2705 | sizeof(struct ietf_mpa_frame)); |
2711 | 2706 | ||
2712 | attr.qp_state = IB_QPS_RTS; | ||
2713 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | ||
2714 | 2707 | ||
2715 | /* notify OF layer that accept event was successfull */ | 2708 | /* notify OF layer that accept event was successfull */ |
2716 | cm_id->add_ref(cm_id); | 2709 | cm_id->add_ref(cm_id); |
@@ -2723,6 +2716,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2723 | cm_event.private_data = NULL; | 2716 | cm_event.private_data = NULL; |
2724 | cm_event.private_data_len = 0; | 2717 | cm_event.private_data_len = 0; |
2725 | ret = cm_id->event_handler(cm_id, &cm_event); | 2718 | ret = cm_id->event_handler(cm_id, &cm_event); |
2719 | attr.qp_state = IB_QPS_RTS; | ||
2720 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | ||
2726 | if (cm_node->loopbackpartner) { | 2721 | if (cm_node->loopbackpartner) { |
2727 | cm_node->loopbackpartner->mpa_frame_size = | 2722 | cm_node->loopbackpartner->mpa_frame_size = |
2728 | nesqp->private_data_len; | 2723 | nesqp->private_data_len; |
@@ -2735,6 +2730,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2735 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 2730 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
2736 | "ret=%d\n", __func__, __LINE__, ret); | 2731 | "ret=%d\n", __func__, __LINE__, ret); |
2737 | 2732 | ||
2733 | passive_state = atomic_add_return(1, &cm_node->passive_state); | ||
2734 | if (passive_state == NES_SEND_RESET_EVENT) | ||
2735 | create_event(cm_node, NES_CM_EVENT_RESET); | ||
2738 | return 0; | 2736 | return 0; |
2739 | } | 2737 | } |
2740 | 2738 | ||
@@ -2938,15 +2936,16 @@ int nes_destroy_listen(struct iw_cm_id *cm_id) | |||
2938 | */ | 2936 | */ |
2939 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) | 2937 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) |
2940 | { | 2938 | { |
2939 | int rc = 0; | ||
2941 | cm_packets_received++; | 2940 | cm_packets_received++; |
2942 | if ((g_cm_core) && (g_cm_core->api)) { | 2941 | if ((g_cm_core) && (g_cm_core->api)) { |
2943 | g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); | 2942 | rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); |
2944 | } else { | 2943 | } else { |
2945 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," | 2944 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," |
2946 | " cm is not setup properly.\n"); | 2945 | " cm is not setup properly.\n"); |
2947 | } | 2946 | } |
2948 | 2947 | ||
2949 | return 0; | 2948 | return rc; |
2950 | } | 2949 | } |
2951 | 2950 | ||
2952 | 2951 | ||
@@ -3217,6 +3216,18 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3217 | cm_event.private_data_len = 0; | 3216 | cm_event.private_data_len = 0; |
3218 | 3217 | ||
3219 | ret = cm_id->event_handler(cm_id, &cm_event); | 3218 | ret = cm_id->event_handler(cm_id, &cm_event); |
3219 | cm_id->add_ref(cm_id); | ||
3220 | atomic_inc(&cm_closes); | ||
3221 | cm_event.event = IW_CM_EVENT_CLOSE; | ||
3222 | cm_event.status = IW_CM_EVENT_STATUS_OK; | ||
3223 | cm_event.provider_data = cm_id->provider_data; | ||
3224 | cm_event.local_addr = cm_id->local_addr; | ||
3225 | cm_event.remote_addr = cm_id->remote_addr; | ||
3226 | cm_event.private_data = NULL; | ||
3227 | cm_event.private_data_len = 0; | ||
3228 | nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node); | ||
3229 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
3230 | |||
3220 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 3231 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
3221 | 3232 | ||
3222 | 3233 | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 367b3d290140..fafa35042ebd 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -76,6 +76,10 @@ enum nes_timer_type { | |||
76 | NES_TIMER_TYPE_CLOSE, | 76 | NES_TIMER_TYPE_CLOSE, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | #define NES_PASSIVE_STATE_INDICATED 0 | ||
80 | #define NES_DO_NOT_SEND_RESET_EVENT 1 | ||
81 | #define NES_SEND_RESET_EVENT 2 | ||
82 | |||
79 | #define MAX_NES_IFS 4 | 83 | #define MAX_NES_IFS 4 |
80 | 84 | ||
81 | #define SET_ACK 1 | 85 | #define SET_ACK 1 |
@@ -161,6 +165,8 @@ struct nes_timer_entry { | |||
161 | 165 | ||
162 | #define NES_CM_DEF_SEQ2 0x18ed5740 | 166 | #define NES_CM_DEF_SEQ2 0x18ed5740 |
163 | #define NES_CM_DEF_LOCAL_ID2 0xb807 | 167 | #define NES_CM_DEF_LOCAL_ID2 0xb807 |
168 | #define MAX_CM_BUFFER 512 | ||
169 | |||
164 | 170 | ||
165 | typedef u32 nes_addr_t; | 171 | typedef u32 nes_addr_t; |
166 | 172 | ||
@@ -254,8 +260,6 @@ struct nes_cm_listener { | |||
254 | 260 | ||
255 | /* per connection node and node state information */ | 261 | /* per connection node and node state information */ |
256 | struct nes_cm_node { | 262 | struct nes_cm_node { |
257 | u32 hashkey; | ||
258 | |||
259 | nes_addr_t loc_addr, rem_addr; | 263 | nes_addr_t loc_addr, rem_addr; |
260 | u16 loc_port, rem_port; | 264 | u16 loc_port, rem_port; |
261 | 265 | ||
@@ -292,7 +296,10 @@ struct nes_cm_node { | |||
292 | int apbvt_set; | 296 | int apbvt_set; |
293 | int accept_pend; | 297 | int accept_pend; |
294 | int freed; | 298 | int freed; |
299 | struct list_head timer_entry; | ||
300 | struct list_head reset_entry; | ||
295 | struct nes_qp *nesqp; | 301 | struct nes_qp *nesqp; |
302 | atomic_t passive_state; | ||
296 | }; | 303 | }; |
297 | 304 | ||
298 | /* structure for client or CM to fill when making CM api calls. */ | 305 | /* structure for client or CM to fill when making CM api calls. */ |
@@ -350,7 +357,6 @@ struct nes_cm_core { | |||
350 | u32 mtu; | 357 | u32 mtu; |
351 | u32 free_tx_pkt_max; | 358 | u32 free_tx_pkt_max; |
352 | u32 rx_pkt_posted; | 359 | u32 rx_pkt_posted; |
353 | struct sk_buff_head tx_free_list; | ||
354 | atomic_t ht_node_cnt; | 360 | atomic_t ht_node_cnt; |
355 | struct list_head connected_nodes; | 361 | struct list_head connected_nodes; |
356 | /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */ | 362 | /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */ |
@@ -390,7 +396,7 @@ struct nes_cm_ops { | |||
390 | struct nes_cm_node *); | 396 | struct nes_cm_node *); |
391 | int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, | 397 | int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, |
392 | struct nes_cm_node *); | 398 | struct nes_cm_node *); |
393 | void (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, | 399 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, |
394 | struct sk_buff *); | 400 | struct sk_buff *); |
395 | int (*destroy_cm_core)(struct nes_cm_core *); | 401 | int (*destroy_cm_core)(struct nes_cm_core *); |
396 | int (*get)(struct nes_cm_core *); | 402 | int (*get)(struct nes_cm_core *); |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 735c125b48af..5d139db1b771 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2700,27 +2700,33 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | |||
2700 | pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */ | 2700 | pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */ |
2701 | 2701 | ||
2702 | if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) { | 2702 | if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) { |
2703 | nes_cm_recv(rx_skb, nesvnic->netdev); | 2703 | if (nes_cm_recv(rx_skb, nesvnic->netdev)) |
2704 | rx_skb = NULL; | ||
2705 | } | ||
2706 | if (rx_skb == NULL) | ||
2707 | goto skip_rx_indicate0; | ||
2708 | |||
2709 | |||
2710 | if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && | ||
2711 | (nesvnic->vlan_grp != NULL)) { | ||
2712 | vlan_tag = (u16)(le32_to_cpu( | ||
2713 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) | ||
2714 | >> 16); | ||
2715 | nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", | ||
2716 | nesvnic->netdev->name, vlan_tag); | ||
2717 | if (nes_use_lro) | ||
2718 | lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb, | ||
2719 | nesvnic->vlan_grp, vlan_tag, NULL); | ||
2720 | else | ||
2721 | nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); | ||
2704 | } else { | 2722 | } else { |
2705 | if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) { | 2723 | if (nes_use_lro) |
2706 | vlan_tag = (u16)(le32_to_cpu( | 2724 | lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); |
2707 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) | 2725 | else |
2708 | >> 16); | 2726 | nes_netif_rx(rx_skb); |
2709 | nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", | ||
2710 | nesvnic->netdev->name, vlan_tag); | ||
2711 | if (nes_use_lro) | ||
2712 | lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb, | ||
2713 | nesvnic->vlan_grp, vlan_tag, NULL); | ||
2714 | else | ||
2715 | nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); | ||
2716 | } else { | ||
2717 | if (nes_use_lro) | ||
2718 | lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); | ||
2719 | else | ||
2720 | nes_netif_rx(rx_skb); | ||
2721 | } | ||
2722 | } | 2727 | } |
2723 | 2728 | ||
2729 | skip_rx_indicate0: | ||
2724 | nesvnic->netdev->last_rx = jiffies; | 2730 | nesvnic->netdev->last_rx = jiffies; |
2725 | /* nesvnic->netstats.rx_packets++; */ | 2731 | /* nesvnic->netstats.rx_packets++; */ |
2726 | /* nesvnic->netstats.rx_bytes += rx_pkt_size; */ | 2732 | /* nesvnic->netstats.rx_bytes += rx_pkt_size; */ |
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index 9f16f7a89efc..aa9b7348c728 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -540,11 +540,14 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) | |||
540 | 540 | ||
541 | if (!list_empty(&nesdev->cqp_avail_reqs)) { | 541 | if (!list_empty(&nesdev->cqp_avail_reqs)) { |
542 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | 542 | spin_lock_irqsave(&nesdev->cqp.lock, flags); |
543 | cqp_request = list_entry(nesdev->cqp_avail_reqs.next, | 543 | if (!list_empty(&nesdev->cqp_avail_reqs)) { |
544 | cqp_request = list_entry(nesdev->cqp_avail_reqs.next, | ||
544 | struct nes_cqp_request, list); | 545 | struct nes_cqp_request, list); |
545 | list_del_init(&cqp_request->list); | 546 | list_del_init(&cqp_request->list); |
547 | } | ||
546 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 548 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
547 | } else { | 549 | } |
550 | if (cqp_request == NULL) { | ||
548 | cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); | 551 | cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); |
549 | if (cqp_request) { | 552 | if (cqp_request) { |
550 | cqp_request->dynamic = 1; | 553 | cqp_request->dynamic = 1; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index d36c9a0bf1bb..4fdb72454f94 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -1695,13 +1695,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1695 | /* use 4k pbl */ | 1695 | /* use 4k pbl */ |
1696 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries); | 1696 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries); |
1697 | if (nesadapter->free_4kpbl == 0) { | 1697 | if (nesadapter->free_4kpbl == 0) { |
1698 | if (cqp_request->dynamic) { | 1698 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
1699 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 1699 | nes_free_cqp_request(nesdev, cqp_request); |
1700 | kfree(cqp_request); | ||
1701 | } else { | ||
1702 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1703 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1704 | } | ||
1705 | if (!context) | 1700 | if (!context) |
1706 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1701 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1707 | nescq->hw_cq.cq_pbase); | 1702 | nescq->hw_cq.cq_pbase); |
@@ -1717,13 +1712,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1717 | /* use 256 byte pbl */ | 1712 | /* use 256 byte pbl */ |
1718 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries); | 1713 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries); |
1719 | if (nesadapter->free_256pbl == 0) { | 1714 | if (nesadapter->free_256pbl == 0) { |
1720 | if (cqp_request->dynamic) { | 1715 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
1721 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 1716 | nes_free_cqp_request(nesdev, cqp_request); |
1722 | kfree(cqp_request); | ||
1723 | } else { | ||
1724 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1725 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1726 | } | ||
1727 | if (!context) | 1717 | if (!context) |
1728 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1718 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1729 | nescq->hw_cq.cq_pbase); | 1719 | nescq->hw_cq.cq_pbase); |
@@ -1928,13 +1918,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | |||
1928 | /* Two level PBL */ | 1918 | /* Two level PBL */ |
1929 | if ((pbl_count+1) > nesadapter->free_4kpbl) { | 1919 | if ((pbl_count+1) > nesadapter->free_4kpbl) { |
1930 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); | 1920 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); |
1931 | if (cqp_request->dynamic) { | 1921 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
1932 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 1922 | nes_free_cqp_request(nesdev, cqp_request); |
1933 | kfree(cqp_request); | ||
1934 | } else { | ||
1935 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1936 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1937 | } | ||
1938 | return -ENOMEM; | 1923 | return -ENOMEM; |
1939 | } else { | 1924 | } else { |
1940 | nesadapter->free_4kpbl -= pbl_count+1; | 1925 | nesadapter->free_4kpbl -= pbl_count+1; |
@@ -1942,13 +1927,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | |||
1942 | } else if (residual_page_count > 32) { | 1927 | } else if (residual_page_count > 32) { |
1943 | if (pbl_count > nesadapter->free_4kpbl) { | 1928 | if (pbl_count > nesadapter->free_4kpbl) { |
1944 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); | 1929 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); |
1945 | if (cqp_request->dynamic) { | 1930 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
1946 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 1931 | nes_free_cqp_request(nesdev, cqp_request); |
1947 | kfree(cqp_request); | ||
1948 | } else { | ||
1949 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1950 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1951 | } | ||
1952 | return -ENOMEM; | 1932 | return -ENOMEM; |
1953 | } else { | 1933 | } else { |
1954 | nesadapter->free_4kpbl -= pbl_count; | 1934 | nesadapter->free_4kpbl -= pbl_count; |
@@ -1956,13 +1936,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | |||
1956 | } else { | 1936 | } else { |
1957 | if (pbl_count > nesadapter->free_256pbl) { | 1937 | if (pbl_count > nesadapter->free_256pbl) { |
1958 | nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); | 1938 | nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); |
1959 | if (cqp_request->dynamic) { | 1939 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
1960 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 1940 | nes_free_cqp_request(nesdev, cqp_request); |
1961 | kfree(cqp_request); | ||
1962 | } else { | ||
1963 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1964 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1965 | } | ||
1966 | return -ENOMEM; | 1941 | return -ENOMEM; |
1967 | } else { | 1942 | } else { |
1968 | nesadapter->free_256pbl -= pbl_count; | 1943 | nesadapter->free_256pbl -= pbl_count; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 81a82628a5f1..861119593f2b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -252,6 +252,9 @@ struct iser_conn { | |||
252 | wait_queue_head_t wait; /* waitq for conn/disconn */ | 252 | wait_queue_head_t wait; /* waitq for conn/disconn */ |
253 | atomic_t post_recv_buf_count; /* posted rx count */ | 253 | atomic_t post_recv_buf_count; /* posted rx count */ |
254 | atomic_t post_send_buf_count; /* posted tx count */ | 254 | atomic_t post_send_buf_count; /* posted tx count */ |
255 | atomic_t unexpected_pdu_count;/* count of received * | ||
256 | * unexpected pdus * | ||
257 | * not yet retired */ | ||
255 | char name[ISER_OBJECT_NAME_SIZE]; | 258 | char name[ISER_OBJECT_NAME_SIZE]; |
256 | struct iser_page_vec *page_vec; /* represents SG to fmr maps* | 259 | struct iser_page_vec *page_vec; /* represents SG to fmr maps* |
257 | * maps serialized as tx is*/ | 260 | * maps serialized as tx is*/ |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index cdd283189047..ed1aff21b7ea 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -183,14 +183,8 @@ static int iser_post_receive_control(struct iscsi_conn *conn) | |||
183 | struct iser_regd_buf *regd_data; | 183 | struct iser_regd_buf *regd_data; |
184 | struct iser_dto *recv_dto = NULL; | 184 | struct iser_dto *recv_dto = NULL; |
185 | struct iser_device *device = iser_conn->ib_conn->device; | 185 | struct iser_device *device = iser_conn->ib_conn->device; |
186 | int rx_data_size, err = 0; | 186 | int rx_data_size, err; |
187 | 187 | int posts, outstanding_unexp_pdus; | |
188 | rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); | ||
189 | if (rx_desc == NULL) { | ||
190 | iser_err("Failed to alloc desc for post recv\n"); | ||
191 | return -ENOMEM; | ||
192 | } | ||
193 | rx_desc->type = ISCSI_RX; | ||
194 | 188 | ||
195 | /* for the login sequence we must support rx of upto 8K; login is done | 189 | /* for the login sequence we must support rx of upto 8K; login is done |
196 | * after conn create/bind (connect) and conn stop/bind (reconnect), | 190 | * after conn create/bind (connect) and conn stop/bind (reconnect), |
@@ -201,46 +195,80 @@ static int iser_post_receive_control(struct iscsi_conn *conn) | |||
201 | else /* FIXME till user space sets conn->max_recv_dlength correctly */ | 195 | else /* FIXME till user space sets conn->max_recv_dlength correctly */ |
202 | rx_data_size = 128; | 196 | rx_data_size = 128; |
203 | 197 | ||
204 | rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); | 198 | outstanding_unexp_pdus = |
205 | if (rx_desc->data == NULL) { | 199 | atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0); |
206 | iser_err("Failed to alloc data buf for post recv\n"); | ||
207 | err = -ENOMEM; | ||
208 | goto post_rx_kmalloc_failure; | ||
209 | } | ||
210 | 200 | ||
211 | recv_dto = &rx_desc->dto; | 201 | /* |
212 | recv_dto->ib_conn = iser_conn->ib_conn; | 202 | * in addition to the response buffer, replace those consumed by |
213 | recv_dto->regd_vector_len = 0; | 203 | * unexpected pdus. |
204 | */ | ||
205 | for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) { | ||
206 | rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); | ||
207 | if (rx_desc == NULL) { | ||
208 | iser_err("Failed to alloc desc for post recv %d\n", | ||
209 | posts); | ||
210 | err = -ENOMEM; | ||
211 | goto post_rx_cache_alloc_failure; | ||
212 | } | ||
213 | rx_desc->type = ISCSI_RX; | ||
214 | rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); | ||
215 | if (rx_desc->data == NULL) { | ||
216 | iser_err("Failed to alloc data buf for post recv %d\n", | ||
217 | posts); | ||
218 | err = -ENOMEM; | ||
219 | goto post_rx_kmalloc_failure; | ||
220 | } | ||
214 | 221 | ||
215 | regd_hdr = &rx_desc->hdr_regd_buf; | 222 | recv_dto = &rx_desc->dto; |
216 | memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); | 223 | recv_dto->ib_conn = iser_conn->ib_conn; |
217 | regd_hdr->device = device; | 224 | recv_dto->regd_vector_len = 0; |
218 | regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ | ||
219 | regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; | ||
220 | 225 | ||
221 | iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); | 226 | regd_hdr = &rx_desc->hdr_regd_buf; |
227 | memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); | ||
228 | regd_hdr->device = device; | ||
229 | regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ | ||
230 | regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; | ||
222 | 231 | ||
223 | iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); | 232 | iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); |
224 | 233 | ||
225 | regd_data = &rx_desc->data_regd_buf; | 234 | iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); |
226 | memset(regd_data, 0, sizeof(struct iser_regd_buf)); | ||
227 | regd_data->device = device; | ||
228 | regd_data->virt_addr = rx_desc->data; | ||
229 | regd_data->data_size = rx_data_size; | ||
230 | 235 | ||
231 | iser_reg_single(device, regd_data, DMA_FROM_DEVICE); | 236 | regd_data = &rx_desc->data_regd_buf; |
237 | memset(regd_data, 0, sizeof(struct iser_regd_buf)); | ||
238 | regd_data->device = device; | ||
239 | regd_data->virt_addr = rx_desc->data; | ||
240 | regd_data->data_size = rx_data_size; | ||
232 | 241 | ||
233 | iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); | 242 | iser_reg_single(device, regd_data, DMA_FROM_DEVICE); |
234 | 243 | ||
235 | err = iser_post_recv(rx_desc); | 244 | iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); |
236 | if (!err) | ||
237 | return 0; | ||
238 | 245 | ||
239 | /* iser_post_recv failed */ | 246 | err = iser_post_recv(rx_desc); |
247 | if (err) { | ||
248 | iser_err("Failed iser_post_recv for post %d\n", posts); | ||
249 | goto post_rx_post_recv_failure; | ||
250 | } | ||
251 | } | ||
252 | /* all posts successful */ | ||
253 | return 0; | ||
254 | |||
255 | post_rx_post_recv_failure: | ||
240 | iser_dto_buffs_release(recv_dto); | 256 | iser_dto_buffs_release(recv_dto); |
241 | kfree(rx_desc->data); | 257 | kfree(rx_desc->data); |
242 | post_rx_kmalloc_failure: | 258 | post_rx_kmalloc_failure: |
243 | kmem_cache_free(ig.desc_cache, rx_desc); | 259 | kmem_cache_free(ig.desc_cache, rx_desc); |
260 | post_rx_cache_alloc_failure: | ||
261 | if (posts > 0) { | ||
262 | /* | ||
263 | * response buffer posted, but did not replace all unexpected | ||
264 | * pdu recv bufs. Ignore error, retry occurs next send | ||
265 | */ | ||
266 | outstanding_unexp_pdus -= (posts - 1); | ||
267 | err = 0; | ||
268 | } | ||
269 | atomic_add(outstanding_unexp_pdus, | ||
270 | &iser_conn->ib_conn->unexpected_pdu_count); | ||
271 | |||
244 | return err; | 272 | return err; |
245 | } | 273 | } |
246 | 274 | ||
@@ -274,8 +302,10 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) | |||
274 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 302 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
275 | 303 | ||
276 | int i; | 304 | int i; |
277 | /* no need to keep it in a var, we are after login so if this should | 305 | /* |
278 | * be negotiated, by now the result should be available here */ | 306 | * FIXME this value should be declared to the target during login with |
307 | * the MaxOutstandingUnexpectedPDUs key when supported | ||
308 | */ | ||
279 | int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; | 309 | int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; |
280 | 310 | ||
281 | iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); | 311 | iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); |
@@ -478,6 +508,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
478 | int err = 0; | 508 | int err = 0; |
479 | struct iser_regd_buf *regd_buf; | 509 | struct iser_regd_buf *regd_buf; |
480 | struct iser_device *device; | 510 | struct iser_device *device; |
511 | unsigned char opcode; | ||
481 | 512 | ||
482 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { | 513 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { |
483 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); | 514 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); |
@@ -512,10 +543,15 @@ int iser_send_control(struct iscsi_conn *conn, | |||
512 | data_seg_len); | 543 | data_seg_len); |
513 | } | 544 | } |
514 | 545 | ||
515 | if (iser_post_receive_control(conn) != 0) { | 546 | opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; |
516 | iser_err("post_rcv_buff failed!\n"); | 547 | |
517 | err = -ENOMEM; | 548 | /* post recv buffer for response if one is expected */ |
518 | goto send_control_error; | 549 | if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) { |
550 | if (iser_post_receive_control(conn) != 0) { | ||
551 | iser_err("post_rcv_buff failed!\n"); | ||
552 | err = -ENOMEM; | ||
553 | goto send_control_error; | ||
554 | } | ||
519 | } | 555 | } |
520 | 556 | ||
521 | err = iser_post_send(mdesc); | 557 | err = iser_post_send(mdesc); |
@@ -586,6 +622,20 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
586 | * parallel to the execution of iser_conn_term. So the code that waits * | 622 | * parallel to the execution of iser_conn_term. So the code that waits * |
587 | * for the posted rx bufs refcount to become zero handles everything */ | 623 | * for the posted rx bufs refcount to become zero handles everything */ |
588 | atomic_dec(&conn->ib_conn->post_recv_buf_count); | 624 | atomic_dec(&conn->ib_conn->post_recv_buf_count); |
625 | |||
626 | /* | ||
627 | * if an unexpected PDU was received then the recv wr consumed must | ||
628 | * be replaced, this is done in the next send of a control-type PDU | ||
629 | */ | ||
630 | if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { | ||
631 | /* nop-in with itt = 0xffffffff */ | ||
632 | atomic_inc(&conn->ib_conn->unexpected_pdu_count); | ||
633 | } | ||
634 | else if (opcode == ISCSI_OP_ASYNC_EVENT) { | ||
635 | /* asyncronous message */ | ||
636 | atomic_inc(&conn->ib_conn->unexpected_pdu_count); | ||
637 | } | ||
638 | /* a reject PDU consumes the recv buf posted for the response */ | ||
589 | } | 639 | } |
590 | 640 | ||
591 | void iser_snd_completion(struct iser_desc *tx_desc) | 641 | void iser_snd_completion(struct iser_desc *tx_desc) |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index e418b960e334..319b188145be 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -498,6 +498,7 @@ void iser_conn_init(struct iser_conn *ib_conn) | |||
498 | init_waitqueue_head(&ib_conn->wait); | 498 | init_waitqueue_head(&ib_conn->wait); |
499 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 499 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
500 | atomic_set(&ib_conn->post_send_buf_count, 0); | 500 | atomic_set(&ib_conn->post_send_buf_count, 0); |
501 | atomic_set(&ib_conn->unexpected_pdu_count, 0); | ||
501 | atomic_set(&ib_conn->refcount, 1); | 502 | atomic_set(&ib_conn->refcount, 1); |
502 | INIT_LIST_HEAD(&ib_conn->conn_list); | 503 | INIT_LIST_HEAD(&ib_conn->conn_list); |
503 | spin_lock_init(&ib_conn->lock); | 504 | spin_lock_init(&ib_conn->lock); |
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index c600ab7f93e8..5c8a1bcf7ca7 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/input.h> | 20 | #include <linux/input.h> |
21 | #include <linux/clk.h> | ||
21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
22 | #include <asm/sh_keysc.h> | 23 | #include <asm/sh_keysc.h> |
23 | 24 | ||
@@ -39,6 +40,7 @@ static const struct { | |||
39 | 40 | ||
40 | struct sh_keysc_priv { | 41 | struct sh_keysc_priv { |
41 | void __iomem *iomem_base; | 42 | void __iomem *iomem_base; |
43 | struct clk *clk; | ||
42 | unsigned long last_keys; | 44 | unsigned long last_keys; |
43 | struct input_dev *input; | 45 | struct input_dev *input; |
44 | struct sh_keysc_info pdata; | 46 | struct sh_keysc_info pdata; |
@@ -125,6 +127,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) | |||
125 | struct sh_keysc_info *pdata; | 127 | struct sh_keysc_info *pdata; |
126 | struct resource *res; | 128 | struct resource *res; |
127 | struct input_dev *input; | 129 | struct input_dev *input; |
130 | char clk_name[8]; | ||
128 | int i, k; | 131 | int i, k; |
129 | int irq, error; | 132 | int irq, error; |
130 | 133 | ||
@@ -165,11 +168,19 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) | |||
165 | goto err1; | 168 | goto err1; |
166 | } | 169 | } |
167 | 170 | ||
171 | snprintf(clk_name, sizeof(clk_name), "keysc%d", pdev->id); | ||
172 | priv->clk = clk_get(&pdev->dev, clk_name); | ||
173 | if (IS_ERR(priv->clk)) { | ||
174 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
175 | error = PTR_ERR(priv->clk); | ||
176 | goto err2; | ||
177 | } | ||
178 | |||
168 | priv->input = input_allocate_device(); | 179 | priv->input = input_allocate_device(); |
169 | if (!priv->input) { | 180 | if (!priv->input) { |
170 | dev_err(&pdev->dev, "failed to allocate input device\n"); | 181 | dev_err(&pdev->dev, "failed to allocate input device\n"); |
171 | error = -ENOMEM; | 182 | error = -ENOMEM; |
172 | goto err2; | 183 | goto err3; |
173 | } | 184 | } |
174 | 185 | ||
175 | input = priv->input; | 186 | input = priv->input; |
@@ -187,7 +198,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) | |||
187 | error = request_irq(irq, sh_keysc_isr, 0, pdev->name, pdev); | 198 | error = request_irq(irq, sh_keysc_isr, 0, pdev->name, pdev); |
188 | if (error) { | 199 | if (error) { |
189 | dev_err(&pdev->dev, "failed to request IRQ\n"); | 200 | dev_err(&pdev->dev, "failed to request IRQ\n"); |
190 | goto err3; | 201 | goto err4; |
191 | } | 202 | } |
192 | 203 | ||
193 | for (i = 0; i < SH_KEYSC_MAXKEYS; i++) { | 204 | for (i = 0; i < SH_KEYSC_MAXKEYS; i++) { |
@@ -199,18 +210,22 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) | |||
199 | error = input_register_device(input); | 210 | error = input_register_device(input); |
200 | if (error) { | 211 | if (error) { |
201 | dev_err(&pdev->dev, "failed to register input device\n"); | 212 | dev_err(&pdev->dev, "failed to register input device\n"); |
202 | goto err4; | 213 | goto err5; |
203 | } | 214 | } |
204 | 215 | ||
216 | clk_enable(priv->clk); | ||
217 | |||
205 | iowrite16((sh_keysc_mode[pdata->mode].kymd << 8) | | 218 | iowrite16((sh_keysc_mode[pdata->mode].kymd << 8) | |
206 | pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); | 219 | pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); |
207 | iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); | 220 | iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); |
208 | iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); | 221 | iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); |
209 | return 0; | 222 | return 0; |
210 | err4: | 223 | err5: |
211 | free_irq(irq, pdev); | 224 | free_irq(irq, pdev); |
212 | err3: | 225 | err4: |
213 | input_free_device(input); | 226 | input_free_device(input); |
227 | err3: | ||
228 | clk_put(priv->clk); | ||
214 | err2: | 229 | err2: |
215 | iounmap(priv->iomem_base); | 230 | iounmap(priv->iomem_base); |
216 | err1: | 231 | err1: |
@@ -230,6 +245,9 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev) | |||
230 | free_irq(platform_get_irq(pdev, 0), pdev); | 245 | free_irq(platform_get_irq(pdev, 0), pdev); |
231 | iounmap(priv->iomem_base); | 246 | iounmap(priv->iomem_base); |
232 | 247 | ||
248 | clk_disable(priv->clk); | ||
249 | clk_put(priv->clk); | ||
250 | |||
233 | platform_set_drvdata(pdev, NULL); | 251 | platform_set_drvdata(pdev, NULL); |
234 | kfree(priv); | 252 | kfree(priv); |
235 | return 0; | 253 | return 0; |
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c index 550e80f390a6..0aa66ec4cbdd 100644 --- a/drivers/isdn/capi/capifs.c +++ b/drivers/isdn/capi/capifs.c | |||
@@ -156,8 +156,8 @@ void capifs_new_ncci(unsigned int number, dev_t device) | |||
156 | if (!inode) | 156 | if (!inode) |
157 | return; | 157 | return; |
158 | inode->i_ino = number+2; | 158 | inode->i_ino = number+2; |
159 | inode->i_uid = config.setuid ? config.uid : current->fsuid; | 159 | inode->i_uid = config.setuid ? config.uid : current_fsuid(); |
160 | inode->i_gid = config.setgid ? config.gid : current->fsgid; | 160 | inode->i_gid = config.setgid ? config.gid : current_fsgid(); |
161 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 161 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
162 | init_special_inode(inode, S_IFCHR|config.mode, device); | 162 | init_special_inode(inode, S_IFCHR|config.mode, device); |
163 | //inode->i_op = &capifs_file_inode_operations; | 163 | //inode->i_op = &capifs_file_inode_operations; |
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 484299b031f8..8f9f4912de32 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c | |||
@@ -246,7 +246,8 @@ hysdn_conf_open(struct inode *ino, struct file *filep) | |||
246 | } | 246 | } |
247 | if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) | 247 | if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) |
248 | hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", | 248 | hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", |
249 | filep->f_uid, filep->f_gid, filep->f_mode); | 249 | filep->f_cred->fsuid, filep->f_cred->fsgid, |
250 | filep->f_mode); | ||
250 | 251 | ||
251 | if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { | 252 | if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { |
252 | /* write only access -> write boot file or conf line */ | 253 | /* write only access -> write boot file or conf line */ |
@@ -331,7 +332,8 @@ hysdn_conf_close(struct inode *ino, struct file *filep) | |||
331 | } | 332 | } |
332 | if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) | 333 | if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) |
333 | hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", | 334 | hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", |
334 | filep->f_uid, filep->f_gid, filep->f_mode); | 335 | filep->f_cred->fsuid, filep->f_cred->fsgid, |
336 | filep->f_mode); | ||
335 | 337 | ||
336 | if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { | 338 | if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { |
337 | /* write only access -> write boot file or conf line */ | 339 | /* write only access -> write boot file or conf line */ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ac89a5deaca2..ab7c8e4a61f9 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) | |||
208 | */ | 208 | */ |
209 | 209 | ||
210 | /* IO operations when bitmap is stored near all superblocks */ | 210 | /* IO operations when bitmap is stored near all superblocks */ |
211 | static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index) | 211 | static struct page *read_sb_page(mddev_t *mddev, long offset, |
212 | struct page *page, | ||
213 | unsigned long index, int size) | ||
212 | { | 214 | { |
213 | /* choose a good rdev and read the page from there */ | 215 | /* choose a good rdev and read the page from there */ |
214 | 216 | ||
215 | mdk_rdev_t *rdev; | 217 | mdk_rdev_t *rdev; |
216 | struct list_head *tmp; | 218 | struct list_head *tmp; |
217 | struct page *page = alloc_page(GFP_KERNEL); | ||
218 | sector_t target; | 219 | sector_t target; |
219 | 220 | ||
220 | if (!page) | 221 | if (!page) |
222 | page = alloc_page(GFP_KERNEL); | ||
223 | if (!page) | ||
221 | return ERR_PTR(-ENOMEM); | 224 | return ERR_PTR(-ENOMEM); |
222 | 225 | ||
223 | rdev_for_each(rdev, tmp, mddev) { | 226 | rdev_for_each(rdev, tmp, mddev) { |
@@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde | |||
227 | 230 | ||
228 | target = rdev->sb_start + offset + index * (PAGE_SIZE/512); | 231 | target = rdev->sb_start + offset + index * (PAGE_SIZE/512); |
229 | 232 | ||
230 | if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { | 233 | if (sync_page_io(rdev->bdev, target, |
234 | roundup(size, bdev_hardsect_size(rdev->bdev)), | ||
235 | page, READ)) { | ||
231 | page->index = index; | 236 | page->index = index; |
232 | attach_page_buffers(page, NULL); /* so that free_buffer will | 237 | attach_page_buffers(page, NULL); /* so that free_buffer will |
233 | * quietly no-op */ | 238 | * quietly no-op */ |
@@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap) | |||
544 | 549 | ||
545 | bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); | 550 | bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); |
546 | } else { | 551 | } else { |
547 | bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); | 552 | bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, |
553 | NULL, | ||
554 | 0, sizeof(bitmap_super_t)); | ||
548 | } | 555 | } |
549 | if (IS_ERR(bitmap->sb_page)) { | 556 | if (IS_ERR(bitmap->sb_page)) { |
550 | err = PTR_ERR(bitmap->sb_page); | 557 | err = PTR_ERR(bitmap->sb_page); |
@@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
957 | */ | 964 | */ |
958 | page = bitmap->sb_page; | 965 | page = bitmap->sb_page; |
959 | offset = sizeof(bitmap_super_t); | 966 | offset = sizeof(bitmap_super_t); |
967 | read_sb_page(bitmap->mddev, bitmap->offset, | ||
968 | page, | ||
969 | index, count); | ||
960 | } else if (file) { | 970 | } else if (file) { |
961 | page = read_page(file, index, bitmap, count); | 971 | page = read_page(file, index, bitmap, count); |
962 | offset = 0; | 972 | offset = 0; |
963 | } else { | 973 | } else { |
964 | page = read_sb_page(bitmap->mddev, bitmap->offset, index); | 974 | page = read_sb_page(bitmap->mddev, bitmap->offset, |
975 | NULL, | ||
976 | index, count); | ||
965 | offset = 0; | 977 | offset = 0; |
966 | } | 978 | } |
967 | if (IS_ERR(page)) { /* read error */ | 979 | if (IS_ERR(page)) { /* read error */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c99e4728ff41..343094c3feeb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/idr.h> | 21 | #include <linux/idr.h> |
22 | #include <linux/hdreg.h> | 22 | #include <linux/hdreg.h> |
23 | #include <linux/blktrace_api.h> | 23 | #include <linux/blktrace_api.h> |
24 | #include <trace/block.h> | ||
24 | 25 | ||
25 | #define DM_MSG_PREFIX "core" | 26 | #define DM_MSG_PREFIX "core" |
26 | 27 | ||
@@ -51,6 +52,8 @@ struct dm_target_io { | |||
51 | union map_info info; | 52 | union map_info info; |
52 | }; | 53 | }; |
53 | 54 | ||
55 | DEFINE_TRACE(block_bio_complete); | ||
56 | |||
54 | union map_info *dm_get_mapinfo(struct bio *bio) | 57 | union map_info *dm_get_mapinfo(struct bio *bio) |
55 | { | 58 | { |
56 | if (bio && bio->bi_private) | 59 | if (bio && bio->bi_private) |
@@ -504,8 +507,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
504 | end_io_acct(io); | 507 | end_io_acct(io); |
505 | 508 | ||
506 | if (io->error != DM_ENDIO_REQUEUE) { | 509 | if (io->error != DM_ENDIO_REQUEUE) { |
507 | blk_add_trace_bio(io->md->queue, io->bio, | 510 | trace_block_bio_complete(io->md->queue, io->bio); |
508 | BLK_TA_COMPLETE); | ||
509 | 511 | ||
510 | bio_endio(io->bio, io->error); | 512 | bio_endio(io->bio, io->error); |
511 | } | 513 | } |
@@ -598,7 +600,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
598 | if (r == DM_MAPIO_REMAPPED) { | 600 | if (r == DM_MAPIO_REMAPPED) { |
599 | /* the bio has been remapped so dispatch it */ | 601 | /* the bio has been remapped so dispatch it */ |
600 | 602 | ||
601 | blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, | 603 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, |
602 | tio->io->bio->bi_bdev->bd_dev, | 604 | tio->io->bio->bi_bdev->bd_dev, |
603 | clone->bi_sector, sector); | 605 | clone->bi_sector, sector); |
604 | 606 | ||
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig index 73dc2ee9b014..b34301d56cd2 100644 --- a/drivers/media/dvb/b2c2/Kconfig +++ b/drivers/media/dvb/b2c2/Kconfig | |||
@@ -9,11 +9,11 @@ config DVB_B2C2_FLEXCOP | |||
9 | select DVB_STV0297 if !DVB_FE_CUSTOMISE | 9 | select DVB_STV0297 if !DVB_FE_CUSTOMISE |
10 | select DVB_BCM3510 if !DVB_FE_CUSTOMISE | 10 | select DVB_BCM3510 if !DVB_FE_CUSTOMISE |
11 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE | 11 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE |
12 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | ||
13 | select DVB_S5H1420 if !DVB_FE_CUSTOMISE | 12 | select DVB_S5H1420 if !DVB_FE_CUSTOMISE |
14 | select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE | 13 | select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE |
15 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE | 14 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE |
16 | select DVB_CX24123 if !DVB_FE_CUSTOMISE | 15 | select DVB_CX24123 if !DVB_FE_CUSTOMISE |
16 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE | ||
17 | help | 17 | help |
18 | Support for the digital TV receiver chip made by B2C2 Inc. included in | 18 | Support for the digital TV receiver chip made by B2C2 Inc. included in |
19 | Technisats PCI cards and USB boxes. | 19 | Technisats PCI cards and USB boxes. |
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index a127a4175c40..5cded3708541 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c | |||
@@ -628,12 +628,14 @@ int flexcop_frontend_init(struct flexcop_device *fc) | |||
628 | } | 628 | } |
629 | 629 | ||
630 | /* try the cable dvb (stv0297) */ | 630 | /* try the cable dvb (stv0297) */ |
631 | fc->fc_i2c_adap[0].no_base_addr = 1; | ||
631 | fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); | 632 | fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); |
632 | if (fc->fe != NULL) { | 633 | if (fc->fe != NULL) { |
633 | fc->dev_type = FC_CABLE; | 634 | fc->dev_type = FC_CABLE; |
634 | fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; | 635 | fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; |
635 | goto fe_found; | 636 | goto fe_found; |
636 | } | 637 | } |
638 | fc->fc_i2c_adap[0].no_base_addr = 0; | ||
637 | 639 | ||
638 | /* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */ | 640 | /* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */ |
639 | fc->fe = dvb_attach(mt312_attach, | 641 | fc->fe = dvb_attach(mt312_attach, |
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c index 43a112ec6d44..f13783f08f0f 100644 --- a/drivers/media/dvb/b2c2/flexcop-i2c.c +++ b/drivers/media/dvb/b2c2/flexcop-i2c.c | |||
@@ -47,9 +47,13 @@ static int flexcop_i2c_read4(struct flexcop_i2c_adapter *i2c, | |||
47 | int len = r100.tw_sm_c_100.total_bytes, /* remember total_bytes is buflen-1 */ | 47 | int len = r100.tw_sm_c_100.total_bytes, /* remember total_bytes is buflen-1 */ |
48 | ret; | 48 | ret; |
49 | 49 | ||
50 | r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; | ||
51 | ret = flexcop_i2c_operation(i2c->fc, &r100); | 50 | ret = flexcop_i2c_operation(i2c->fc, &r100); |
52 | if (ret != 0) { | 51 | if (ret != 0) { |
52 | deb_i2c("Retrying operation\n"); | ||
53 | r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; | ||
54 | ret = flexcop_i2c_operation(i2c->fc, &r100); | ||
55 | } | ||
56 | if (ret != 0) { | ||
53 | deb_i2c("read failed. %d\n", ret); | 57 | deb_i2c("read failed. %d\n", ret); |
54 | return ret; | 58 | return ret; |
55 | } | 59 | } |
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig index 7e9c090fc04e..27edb0ece587 100644 --- a/drivers/media/dvb/bt8xx/Kconfig +++ b/drivers/media/dvb/bt8xx/Kconfig | |||
@@ -8,7 +8,7 @@ config DVB_BT8XX | |||
8 | select DVB_OR51211 if !DVB_FE_CUSTOMISE | 8 | select DVB_OR51211 if !DVB_FE_CUSTOMISE |
9 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE | 9 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE |
10 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 10 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
11 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | 11 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE |
12 | help | 12 | help |
13 | Support for PCI cards based on the Bt8xx PCI bridge. Examples are | 13 | Support for PCI cards based on the Bt8xx PCI bridge. Examples are |
14 | the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards, | 14 | the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards, |
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 62b68c291d99..49f7b20c25d6 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig | |||
@@ -24,8 +24,8 @@ config DVB_USB_A800 | |||
24 | tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)" | 24 | tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)" |
25 | depends on DVB_USB | 25 | depends on DVB_USB |
26 | select DVB_DIB3000MC | 26 | select DVB_DIB3000MC |
27 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | ||
28 | select DVB_PLL if !DVB_FE_CUSTOMISE | 27 | select DVB_PLL if !DVB_FE_CUSTOMISE |
28 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE | ||
29 | help | 29 | help |
30 | Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver. | 30 | Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver. |
31 | 31 | ||
@@ -34,7 +34,7 @@ config DVB_USB_DIBUSB_MB | |||
34 | depends on DVB_USB | 34 | depends on DVB_USB |
35 | select DVB_PLL if !DVB_FE_CUSTOMISE | 35 | select DVB_PLL if !DVB_FE_CUSTOMISE |
36 | select DVB_DIB3000MB | 36 | select DVB_DIB3000MB |
37 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | 37 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE |
38 | help | 38 | help |
39 | Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by | 39 | Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by |
40 | DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator. | 40 | DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator. |
@@ -55,7 +55,7 @@ config DVB_USB_DIBUSB_MC | |||
55 | tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)" | 55 | tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)" |
56 | depends on DVB_USB | 56 | depends on DVB_USB |
57 | select DVB_DIB3000MC | 57 | select DVB_DIB3000MC |
58 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | 58 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE |
59 | help | 59 | help |
60 | Support for USB2.0 DVB-T receivers based on reference designs made by | 60 | Support for USB2.0 DVB-T receivers based on reference designs made by |
61 | DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator. | 61 | DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator. |
@@ -73,11 +73,11 @@ config DVB_USB_DIB0700 | |||
73 | select DVB_DIB7000M | 73 | select DVB_DIB7000M |
74 | select DVB_DIB3000MC | 74 | select DVB_DIB3000MC |
75 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE | 75 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE |
76 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | ||
77 | select MEDIA_TUNER_MT2266 if !DVB_FE_CUSTOMISE | ||
78 | select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMISE | ||
79 | select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE | ||
80 | select DVB_TUNER_DIB0070 | 76 | select DVB_TUNER_DIB0070 |
77 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE | ||
78 | select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMIZE | ||
79 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE | ||
80 | select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE | ||
81 | help | 81 | help |
82 | Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The | 82 | Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The |
83 | USB bridge is also present in devices having the DiB7700 DVB-T-USB | 83 | USB bridge is also present in devices having the DiB7700 DVB-T-USB |
@@ -95,7 +95,7 @@ config DVB_USB_UMT_010 | |||
95 | depends on DVB_USB | 95 | depends on DVB_USB |
96 | select DVB_PLL if !DVB_FE_CUSTOMISE | 96 | select DVB_PLL if !DVB_FE_CUSTOMISE |
97 | select DVB_DIB3000MC | 97 | select DVB_DIB3000MC |
98 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | 98 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE |
99 | help | 99 | help |
100 | Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver. | 100 | Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver. |
101 | 101 | ||
@@ -107,11 +107,11 @@ config DVB_USB_CXUSB | |||
107 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE | 107 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE |
108 | select DVB_MT352 if !DVB_FE_CUSTOMISE | 108 | select DVB_MT352 if !DVB_FE_CUSTOMISE |
109 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 109 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
110 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | ||
111 | select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMISE | ||
112 | select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE | ||
113 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE | 110 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE |
114 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE | 111 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE |
112 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE | ||
113 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE | ||
114 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE | ||
115 | help | 115 | help |
116 | Say Y here to support the Conexant USB2.0 hybrid reference design. | 116 | Say Y here to support the Conexant USB2.0 hybrid reference design. |
117 | Currently, only DVB and ATSC modes are supported, analog mode | 117 | Currently, only DVB and ATSC modes are supported, analog mode |
@@ -124,9 +124,9 @@ config DVB_USB_M920X | |||
124 | tristate "Uli m920x DVB-T USB2.0 support" | 124 | tristate "Uli m920x DVB-T USB2.0 support" |
125 | depends on DVB_USB | 125 | depends on DVB_USB |
126 | select DVB_MT352 if !DVB_FE_CUSTOMISE | 126 | select DVB_MT352 if !DVB_FE_CUSTOMISE |
127 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | ||
128 | select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE | ||
129 | select DVB_TDA1004X if !DVB_FE_CUSTOMISE | 127 | select DVB_TDA1004X if !DVB_FE_CUSTOMISE |
128 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE | ||
129 | select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE | ||
130 | help | 130 | help |
131 | Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver. | 131 | Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver. |
132 | Currently, only devices with a product id of | 132 | Currently, only devices with a product id of |
@@ -137,7 +137,7 @@ config DVB_USB_GL861 | |||
137 | tristate "Genesys Logic GL861 USB2.0 support" | 137 | tristate "Genesys Logic GL861 USB2.0 support" |
138 | depends on DVB_USB | 138 | depends on DVB_USB |
139 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 139 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
140 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | 140 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE |
141 | help | 141 | help |
142 | Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0 | 142 | Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0 |
143 | receiver with USB ID 0db0:5581. | 143 | receiver with USB ID 0db0:5581. |
@@ -146,7 +146,7 @@ config DVB_USB_AU6610 | |||
146 | tristate "Alcor Micro AU6610 USB2.0 support" | 146 | tristate "Alcor Micro AU6610 USB2.0 support" |
147 | depends on DVB_USB | 147 | depends on DVB_USB |
148 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 148 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
149 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | 149 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE |
150 | help | 150 | help |
151 | Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver. | 151 | Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver. |
152 | 152 | ||
@@ -198,8 +198,8 @@ config DVB_USB_NOVA_T_USB2 | |||
198 | tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support" | 198 | tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support" |
199 | depends on DVB_USB | 199 | depends on DVB_USB |
200 | select DVB_DIB3000MC | 200 | select DVB_DIB3000MC |
201 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | ||
202 | select DVB_PLL if !DVB_FE_CUSTOMISE | 201 | select DVB_PLL if !DVB_FE_CUSTOMISE |
202 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE | ||
203 | help | 203 | help |
204 | Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver. | 204 | Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver. |
205 | 205 | ||
@@ -235,8 +235,8 @@ config DVB_USB_OPERA1 | |||
235 | config DVB_USB_AF9005 | 235 | config DVB_USB_AF9005 |
236 | tristate "Afatech AF9005 DVB-T USB1.1 support" | 236 | tristate "Afatech AF9005 DVB-T USB1.1 support" |
237 | depends on DVB_USB && EXPERIMENTAL | 237 | depends on DVB_USB && EXPERIMENTAL |
238 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | 238 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE |
239 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | 239 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE |
240 | help | 240 | help |
241 | Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver | 241 | Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver |
242 | and the TerraTec Cinergy T USB XE (Rev.1) | 242 | and the TerraTec Cinergy T USB XE (Rev.1) |
@@ -284,7 +284,7 @@ config DVB_USB_DTV5100 | |||
284 | tristate "AME DTV-5100 USB2.0 DVB-T support" | 284 | tristate "AME DTV-5100 USB2.0 DVB-T support" |
285 | depends on DVB_USB | 285 | depends on DVB_USB |
286 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 286 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
287 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | 287 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE |
288 | help | 288 | help |
289 | Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver. | 289 | Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver. |
290 | 290 | ||
@@ -293,9 +293,9 @@ config DVB_USB_AF9015 | |||
293 | depends on DVB_USB && EXPERIMENTAL | 293 | depends on DVB_USB && EXPERIMENTAL |
294 | select DVB_AF9013 | 294 | select DVB_AF9013 |
295 | select DVB_PLL if !DVB_FE_CUSTOMISE | 295 | select DVB_PLL if !DVB_FE_CUSTOMISE |
296 | select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE | 296 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE |
297 | select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE | 297 | select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE |
298 | select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMISE | 298 | select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMIZE |
299 | select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE | 299 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE |
300 | help | 300 | help |
301 | Say Y here to support the Afatech AF9015 based DVB-T USB2.0 receiver | 301 | Say Y here to support the Afatech AF9015 based DVB-T USB2.0 receiver |
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index f28d3ae59e04..391732788911 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c | |||
@@ -446,13 +446,13 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) | |||
446 | == NULL ? -ENODEV : 0; | 446 | == NULL ? -ENODEV : 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | #define DEFAULT_RC_INTERVAL 150 | 449 | #define DEFAULT_RC_INTERVAL 50 |
450 | 450 | ||
451 | static u8 rc_request[] = { REQUEST_POLL_RC, 0 }; | 451 | static u8 rc_request[] = { REQUEST_POLL_RC, 0 }; |
452 | 452 | ||
453 | /* Number of keypresses to ignore before start repeating */ | 453 | /* Number of keypresses to ignore before start repeating */ |
454 | #define RC_REPEAT_DELAY 2 | 454 | #define RC_REPEAT_DELAY 6 |
455 | #define RC_REPEAT_DELAY_V1_20 5 | 455 | #define RC_REPEAT_DELAY_V1_20 10 |
456 | 456 | ||
457 | 457 | ||
458 | 458 | ||
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig index 867027ceab3e..401a04effc06 100644 --- a/drivers/media/dvb/ttpci/Kconfig +++ b/drivers/media/dvb/ttpci/Kconfig | |||
@@ -106,7 +106,7 @@ config DVB_BUDGET_CI | |||
106 | select DVB_TDA1004X if !DVB_FE_CUSTOMISE | 106 | select DVB_TDA1004X if !DVB_FE_CUSTOMISE |
107 | select DVB_LNBP21 if !DVB_FE_CUSTOMISE | 107 | select DVB_LNBP21 if !DVB_FE_CUSTOMISE |
108 | select DVB_TDA10023 if !DVB_FE_CUSTOMISE | 108 | select DVB_TDA10023 if !DVB_FE_CUSTOMISE |
109 | select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE | 109 | select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE |
110 | select VIDEO_IR | 110 | select VIDEO_IR |
111 | help | 111 | help |
112 | Support for simple SAA7146 based DVB cards | 112 | Support for simple SAA7146 based DVB cards |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 47102c2c8250..057fd7e160c4 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -759,7 +759,7 @@ config VIDEO_PXA27x | |||
759 | 759 | ||
760 | config VIDEO_SH_MOBILE_CEU | 760 | config VIDEO_SH_MOBILE_CEU |
761 | tristate "SuperH Mobile CEU Interface driver" | 761 | tristate "SuperH Mobile CEU Interface driver" |
762 | depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA | 762 | depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK |
763 | select VIDEOBUF_DMA_CONTIG | 763 | select VIDEOBUF_DMA_CONTIG |
764 | ---help--- | 764 | ---help--- |
765 | This is a v4l2 driver for the SuperH Mobile CEU Interface | 765 | This is a v4l2 driver for the SuperH Mobile CEU Interface |
diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c index e6ca4012b5f0..0ea85a05e5c0 100644 --- a/drivers/media/video/compat_ioctl32.c +++ b/drivers/media/video/compat_ioctl32.c | |||
@@ -831,7 +831,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) | |||
831 | { | 831 | { |
832 | int ret = -ENOIOCTLCMD; | 832 | int ret = -ENOIOCTLCMD; |
833 | 833 | ||
834 | if (!file->f_op->ioctl) | 834 | if (!file->f_op->ioctl && !file->f_op->unlocked_ioctl) |
835 | return ret; | 835 | return ret; |
836 | 836 | ||
837 | switch (cmd) { | 837 | switch (cmd) { |
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig index ef48565de7f1..8940b5387dec 100644 --- a/drivers/media/video/cx18/Kconfig +++ b/drivers/media/video/cx18/Kconfig | |||
@@ -9,7 +9,7 @@ config VIDEO_CX18 | |||
9 | select VIDEO_CX2341X | 9 | select VIDEO_CX2341X |
10 | select VIDEO_CS5345 | 10 | select VIDEO_CS5345 |
11 | select DVB_S5H1409 if !DVB_FE_CUSTOMISE | 11 | select DVB_S5H1409 if !DVB_FE_CUSTOMISE |
12 | select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE | 12 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE |
13 | ---help--- | 13 | ---help--- |
14 | This is a video4linux driver for Conexant cx23418 based | 14 | This is a video4linux driver for Conexant cx23418 based |
15 | PCI combo video recorder devices. | 15 | PCI combo video recorder devices. |
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig index 8c1b7fa47a41..00f1e2e8889e 100644 --- a/drivers/media/video/cx23885/Kconfig +++ b/drivers/media/video/cx23885/Kconfig | |||
@@ -11,16 +11,16 @@ config VIDEO_CX23885 | |||
11 | select VIDEO_CX25840 | 11 | select VIDEO_CX25840 |
12 | select VIDEO_CX2341X | 12 | select VIDEO_CX2341X |
13 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE | 13 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE |
14 | select MEDIA_TUNER_MT2131 if !DVB_FE_CUSTOMISE | ||
15 | select DVB_S5H1409 if !DVB_FE_CUSTOMISE | 14 | select DVB_S5H1409 if !DVB_FE_CUSTOMISE |
16 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE | 15 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE |
17 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE | 16 | select DVB_LGDT330X if !DVB_FE_CUSTOMISE |
18 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 17 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
18 | select DVB_TDA10048 if !DVB_FE_CUSTOMIZE | ||
19 | select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMIZE | ||
19 | select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE | 20 | select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE |
20 | select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE | 21 | select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE |
21 | select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE | 22 | select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE |
22 | select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE | 23 | select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE |
23 | select DVB_TDA10048 if !DVB_FE_CUSTOMIZE | ||
24 | ---help--- | 24 | ---help--- |
25 | This is a video4linux driver for Conexant 23885 based | 25 | This is a video4linux driver for Conexant 23885 based |
26 | TV cards. | 26 | TV cards. |
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig index 0b9e5fac6239..b0f837588e01 100644 --- a/drivers/media/video/cx88/Kconfig +++ b/drivers/media/video/cx88/Kconfig | |||
@@ -56,12 +56,12 @@ config VIDEO_CX88_DVB | |||
56 | select DVB_NXT200X if !DVB_FE_CUSTOMISE | 56 | select DVB_NXT200X if !DVB_FE_CUSTOMISE |
57 | select DVB_CX24123 if !DVB_FE_CUSTOMISE | 57 | select DVB_CX24123 if !DVB_FE_CUSTOMISE |
58 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE | 58 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE |
59 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | ||
60 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE | 59 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE |
61 | select DVB_CX24116 if !DVB_FE_CUSTOMISE | 60 | select DVB_CX24116 if !DVB_FE_CUSTOMISE |
62 | select DVB_STV0299 if !DVB_FE_CUSTOMISE | 61 | select DVB_STV0299 if !DVB_FE_CUSTOMISE |
63 | select DVB_STV0288 if !DVB_FE_CUSTOMISE | 62 | select DVB_STV0288 if !DVB_FE_CUSTOMISE |
64 | select DVB_STB6000 if !DVB_FE_CUSTOMISE | 63 | select DVB_STB6000 if !DVB_FE_CUSTOMISE |
64 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE | ||
65 | ---help--- | 65 | ---help--- |
66 | This adds support for DVB/ATSC cards based on the | 66 | This adds support for DVB/ATSC cards based on the |
67 | Conexant 2388x chip. | 67 | Conexant 2388x chip. |
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 610f535a257c..4ea1f1e04897 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c | |||
@@ -549,10 +549,11 @@ static int em28xx_config(struct em28xx *dev) | |||
549 | static void em28xx_config_i2c(struct em28xx *dev) | 549 | static void em28xx_config_i2c(struct em28xx *dev) |
550 | { | 550 | { |
551 | struct v4l2_routing route; | 551 | struct v4l2_routing route; |
552 | int zero = 0; | ||
552 | 553 | ||
553 | route.input = INPUT(dev->ctl_input)->vmux; | 554 | route.input = INPUT(dev->ctl_input)->vmux; |
554 | route.output = 0; | 555 | route.output = 0; |
555 | em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, NULL); | 556 | em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, &zero); |
556 | em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route); | 557 | em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route); |
557 | em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL); | 558 | em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL); |
558 | } | 559 | } |
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 748a87e82e44..02a6e9ef0337 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c | |||
@@ -1264,10 +1264,10 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv, | |||
1264 | struct gspca_dev *gspca_dev = priv; | 1264 | struct gspca_dev *gspca_dev = priv; |
1265 | int ret; | 1265 | int ret; |
1266 | 1266 | ||
1267 | if (mutex_lock_interruptible(&gspca_dev->usb_lock)) | ||
1268 | return -ERESTARTSYS; | ||
1269 | if (!gspca_dev->sd_desc->set_jcomp) | 1267 | if (!gspca_dev->sd_desc->set_jcomp) |
1270 | return -EINVAL; | 1268 | return -EINVAL; |
1269 | if (mutex_lock_interruptible(&gspca_dev->usb_lock)) | ||
1270 | return -ERESTARTSYS; | ||
1271 | ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp); | 1271 | ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp); |
1272 | mutex_unlock(&gspca_dev->usb_lock); | 1272 | mutex_unlock(&gspca_dev->usb_lock); |
1273 | return ret; | 1273 | return ret; |
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig index 19eb274c9cd0..854c2a885358 100644 --- a/drivers/media/video/pvrusb2/Kconfig +++ b/drivers/media/video/pvrusb2/Kconfig | |||
@@ -42,7 +42,7 @@ config VIDEO_PVRUSB2_DVB | |||
42 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE | 42 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE |
43 | select DVB_TDA10048 if !DVB_FE_CUSTOMIZE | 43 | select DVB_TDA10048 if !DVB_FE_CUSTOMIZE |
44 | select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE | 44 | select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE |
45 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | 45 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE |
46 | select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE | 46 | select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE |
47 | ---help--- | 47 | ---help--- |
48 | 48 | ||
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig index 7021bbf5897b..fc2164e28e76 100644 --- a/drivers/media/video/saa7134/Kconfig +++ b/drivers/media/video/saa7134/Kconfig | |||
@@ -34,9 +34,9 @@ config VIDEO_SAA7134_DVB | |||
34 | select DVB_NXT200X if !DVB_FE_CUSTOMISE | 34 | select DVB_NXT200X if !DVB_FE_CUSTOMISE |
35 | select DVB_TDA10086 if !DVB_FE_CUSTOMISE | 35 | select DVB_TDA10086 if !DVB_FE_CUSTOMISE |
36 | select DVB_TDA826X if !DVB_FE_CUSTOMISE | 36 | select DVB_TDA826X if !DVB_FE_CUSTOMISE |
37 | select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE | ||
38 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE | 37 | select DVB_ISL6421 if !DVB_FE_CUSTOMISE |
39 | select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE | 38 | select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE |
39 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE | ||
40 | ---help--- | 40 | ---help--- |
41 | This adds support for DVB cards based on the | 41 | This adds support for DVB cards based on the |
42 | Philips saa7134 chip. | 42 | Philips saa7134 chip. |
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 2407607f2eff..536b1a9b310c 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | #include <linux/videodev2.h> | 33 | #include <linux/videodev2.h> |
34 | #include <linux/clk.h> | ||
34 | 35 | ||
35 | #include <media/v4l2-common.h> | 36 | #include <media/v4l2-common.h> |
36 | #include <media/v4l2-dev.h> | 37 | #include <media/v4l2-dev.h> |
@@ -89,6 +90,7 @@ struct sh_mobile_ceu_dev { | |||
89 | 90 | ||
90 | unsigned int irq; | 91 | unsigned int irq; |
91 | void __iomem *base; | 92 | void __iomem *base; |
93 | struct clk *clk; | ||
92 | unsigned long video_limit; | 94 | unsigned long video_limit; |
93 | 95 | ||
94 | /* lock used to protect videobuf */ | 96 | /* lock used to protect videobuf */ |
@@ -309,6 +311,8 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) | |||
309 | if (ret) | 311 | if (ret) |
310 | goto err; | 312 | goto err; |
311 | 313 | ||
314 | clk_enable(pcdev->clk); | ||
315 | |||
312 | ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ | 316 | ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ |
313 | while (ceu_read(pcdev, CSTSR) & 1) | 317 | while (ceu_read(pcdev, CSTSR) & 1) |
314 | msleep(1); | 318 | msleep(1); |
@@ -342,6 +346,8 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) | |||
342 | } | 346 | } |
343 | spin_unlock_irqrestore(&pcdev->lock, flags); | 347 | spin_unlock_irqrestore(&pcdev->lock, flags); |
344 | 348 | ||
349 | clk_disable(pcdev->clk); | ||
350 | |||
345 | icd->ops->release(icd); | 351 | icd->ops->release(icd); |
346 | 352 | ||
347 | dev_info(&icd->dev, | 353 | dev_info(&icd->dev, |
@@ -550,6 +556,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) | |||
550 | struct sh_mobile_ceu_dev *pcdev; | 556 | struct sh_mobile_ceu_dev *pcdev; |
551 | struct resource *res; | 557 | struct resource *res; |
552 | void __iomem *base; | 558 | void __iomem *base; |
559 | char clk_name[8]; | ||
553 | unsigned int irq; | 560 | unsigned int irq; |
554 | int err = 0; | 561 | int err = 0; |
555 | 562 | ||
@@ -615,6 +622,14 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) | |||
615 | goto exit_release_mem; | 622 | goto exit_release_mem; |
616 | } | 623 | } |
617 | 624 | ||
625 | snprintf(clk_name, sizeof(clk_name), "ceu%d", pdev->id); | ||
626 | pcdev->clk = clk_get(&pdev->dev, clk_name); | ||
627 | if (IS_ERR(pcdev->clk)) { | ||
628 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
629 | err = PTR_ERR(pcdev->clk); | ||
630 | goto exit_free_irq; | ||
631 | } | ||
632 | |||
618 | pcdev->ici.priv = pcdev; | 633 | pcdev->ici.priv = pcdev; |
619 | pcdev->ici.dev.parent = &pdev->dev; | 634 | pcdev->ici.dev.parent = &pdev->dev; |
620 | pcdev->ici.nr = pdev->id; | 635 | pcdev->ici.nr = pdev->id; |
@@ -623,10 +638,12 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) | |||
623 | 638 | ||
624 | err = soc_camera_host_register(&pcdev->ici); | 639 | err = soc_camera_host_register(&pcdev->ici); |
625 | if (err) | 640 | if (err) |
626 | goto exit_free_irq; | 641 | goto exit_free_clk; |
627 | 642 | ||
628 | return 0; | 643 | return 0; |
629 | 644 | ||
645 | exit_free_clk: | ||
646 | clk_put(pcdev->clk); | ||
630 | exit_free_irq: | 647 | exit_free_irq: |
631 | free_irq(pcdev->irq, pcdev); | 648 | free_irq(pcdev->irq, pcdev); |
632 | exit_release_mem: | 649 | exit_release_mem: |
@@ -645,6 +662,7 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev) | |||
645 | struct sh_mobile_ceu_dev *pcdev = platform_get_drvdata(pdev); | 662 | struct sh_mobile_ceu_dev *pcdev = platform_get_drvdata(pdev); |
646 | 663 | ||
647 | soc_camera_host_unregister(&pcdev->ici); | 664 | soc_camera_host_unregister(&pcdev->ici); |
665 | clk_put(pcdev->clk); | ||
648 | free_irq(pcdev->irq, pcdev); | 666 | free_irq(pcdev->irq, pcdev); |
649 | if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) | 667 | if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) |
650 | dma_release_declared_memory(&pdev->dev); | 668 | dma_release_declared_memory(&pdev->dev); |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index d62fd4f6b52e..ee090413e598 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -2008,6 +2008,9 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) | |||
2008 | return FAILED; | 2008 | return FAILED; |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | /* make sure we have no outstanding commands at this stage */ | ||
2012 | mptscsih_flush_running_cmds(hd); | ||
2013 | |||
2011 | ioc = hd->ioc; | 2014 | ioc = hd->ioc; |
2012 | printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n", | 2015 | printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n", |
2013 | ioc->name, SCpnt); | 2016 | ioc->name, SCpnt); |
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 533923f83f1a..73b0ca061bb5 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c | |||
@@ -317,7 +317,6 @@ int gru_proc_init(void) | |||
317 | { | 317 | { |
318 | struct proc_entry *p; | 318 | struct proc_entry *p; |
319 | 319 | ||
320 | proc_mkdir("sgi_uv", NULL); | ||
321 | proc_gru = proc_mkdir("sgi_uv/gru", NULL); | 320 | proc_gru = proc_mkdir("sgi_uv/gru", NULL); |
322 | 321 | ||
323 | for (p = proc_files; p->name; p++) | 322 | for (p = proc_files; p->name; p++) |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index ed1722e50049..7b4cbd5e03e9 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -194,9 +194,10 @@ enum xp_retval { | |||
194 | xpGruSendMqError, /* 59: gru send message queue related error */ | 194 | xpGruSendMqError, /* 59: gru send message queue related error */ |
195 | 195 | ||
196 | xpBadChannelNumber, /* 60: invalid channel number */ | 196 | xpBadChannelNumber, /* 60: invalid channel number */ |
197 | xpBadMsgType, /* 60: invalid message type */ | 197 | xpBadMsgType, /* 61: invalid message type */ |
198 | xpBiosError, /* 62: BIOS error */ | ||
198 | 199 | ||
199 | xpUnknownReason /* 61: unknown reason - must be last in enum */ | 200 | xpUnknownReason /* 63: unknown reason - must be last in enum */ |
200 | }; | 201 | }; |
201 | 202 | ||
202 | /* | 203 | /* |
@@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *); | |||
345 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, | 346 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, |
346 | size_t); | 347 | size_t); |
347 | extern int (*xp_cpu_to_nasid) (int); | 348 | extern int (*xp_cpu_to_nasid) (int); |
349 | extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long); | ||
350 | extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long); | ||
348 | 351 | ||
349 | extern u64 xp_nofault_PIOR_target; | 352 | extern u64 xp_nofault_PIOR_target; |
350 | extern int xp_nofault_PIOR(void *); | 353 | extern int xp_nofault_PIOR(void *); |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 66a1d19e08ad..9a2e77172d94 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy); | |||
51 | int (*xp_cpu_to_nasid) (int cpuid); | 51 | int (*xp_cpu_to_nasid) (int cpuid); |
52 | EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); | 52 | EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); |
53 | 53 | ||
54 | enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, | ||
55 | unsigned long size); | ||
56 | EXPORT_SYMBOL_GPL(xp_expand_memprotect); | ||
57 | enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, | ||
58 | unsigned long size); | ||
59 | EXPORT_SYMBOL_GPL(xp_restrict_memprotect); | ||
60 | |||
54 | /* | 61 | /* |
55 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level | 62 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level |
56 | * users of XPC. | 63 | * users of XPC. |
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index 1440134caf31..fb3ec9d735a9 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c | |||
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid) | |||
120 | return cpuid_to_nasid(cpuid); | 120 | return cpuid_to_nasid(cpuid); |
121 | } | 121 | } |
122 | 122 | ||
123 | static enum xp_retval | ||
124 | xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size) | ||
125 | { | ||
126 | u64 nasid_array = 0; | ||
127 | int ret; | ||
128 | |||
129 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, | ||
130 | &nasid_array); | ||
131 | if (ret != 0) { | ||
132 | dev_err(xp, "sn_change_memprotect(,, " | ||
133 | "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); | ||
134 | return xpSalError; | ||
135 | } | ||
136 | return xpSuccess; | ||
137 | } | ||
138 | |||
139 | static enum xp_retval | ||
140 | xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size) | ||
141 | { | ||
142 | u64 nasid_array = 0; | ||
143 | int ret; | ||
144 | |||
145 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, | ||
146 | &nasid_array); | ||
147 | if (ret != 0) { | ||
148 | dev_err(xp, "sn_change_memprotect(,, " | ||
149 | "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); | ||
150 | return xpSalError; | ||
151 | } | ||
152 | return xpSuccess; | ||
153 | } | ||
154 | |||
123 | enum xp_retval | 155 | enum xp_retval |
124 | xp_init_sn2(void) | 156 | xp_init_sn2(void) |
125 | { | 157 | { |
@@ -132,6 +164,8 @@ xp_init_sn2(void) | |||
132 | xp_pa = xp_pa_sn2; | 164 | xp_pa = xp_pa_sn2; |
133 | xp_remote_memcpy = xp_remote_memcpy_sn2; | 165 | xp_remote_memcpy = xp_remote_memcpy_sn2; |
134 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; | 166 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; |
167 | xp_expand_memprotect = xp_expand_memprotect_sn2; | ||
168 | xp_restrict_memprotect = xp_restrict_memprotect_sn2; | ||
135 | 169 | ||
136 | return xp_register_nofault_code_sn2(); | 170 | return xp_register_nofault_code_sn2(); |
137 | } | 171 | } |
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index d9f7ce2510bc..d238576b26fa 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c | |||
@@ -15,6 +15,11 @@ | |||
15 | 15 | ||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <asm/uv/uv_hub.h> | 17 | #include <asm/uv/uv_hub.h> |
18 | #if defined CONFIG_X86_64 | ||
19 | #include <asm/uv/bios.h> | ||
20 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
21 | #include <asm/sn/sn_sal.h> | ||
22 | #endif | ||
18 | #include "../sgi-gru/grukservices.h" | 23 | #include "../sgi-gru/grukservices.h" |
19 | #include "xp.h" | 24 | #include "xp.h" |
20 | 25 | ||
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid) | |||
49 | return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); | 54 | return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); |
50 | } | 55 | } |
51 | 56 | ||
57 | static enum xp_retval | ||
58 | xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) | ||
59 | { | ||
60 | int ret; | ||
61 | |||
62 | #if defined CONFIG_X86_64 | ||
63 | ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); | ||
64 | if (ret != BIOS_STATUS_SUCCESS) { | ||
65 | dev_err(xp, "uv_bios_change_memprotect(,, " | ||
66 | "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); | ||
67 | return xpBiosError; | ||
68 | } | ||
69 | |||
70 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
71 | u64 nasid_array; | ||
72 | |||
73 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, | ||
74 | &nasid_array); | ||
75 | if (ret != 0) { | ||
76 | dev_err(xp, "sn_change_memprotect(,, " | ||
77 | "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); | ||
78 | return xpSalError; | ||
79 | } | ||
80 | #else | ||
81 | #error not a supported configuration | ||
82 | #endif | ||
83 | return xpSuccess; | ||
84 | } | ||
85 | |||
86 | static enum xp_retval | ||
87 | xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) | ||
88 | { | ||
89 | int ret; | ||
90 | |||
91 | #if defined CONFIG_X86_64 | ||
92 | ret = uv_bios_change_memprotect(phys_addr, size, | ||
93 | UV_MEMPROT_RESTRICT_ACCESS); | ||
94 | if (ret != BIOS_STATUS_SUCCESS) { | ||
95 | dev_err(xp, "uv_bios_change_memprotect(,, " | ||
96 | "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); | ||
97 | return xpBiosError; | ||
98 | } | ||
99 | |||
100 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
101 | u64 nasid_array; | ||
102 | |||
103 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, | ||
104 | &nasid_array); | ||
105 | if (ret != 0) { | ||
106 | dev_err(xp, "sn_change_memprotect(,, " | ||
107 | "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); | ||
108 | return xpSalError; | ||
109 | } | ||
110 | #else | ||
111 | #error not a supported configuration | ||
112 | #endif | ||
113 | return xpSuccess; | ||
114 | } | ||
115 | |||
52 | enum xp_retval | 116 | enum xp_retval |
53 | xp_init_uv(void) | 117 | xp_init_uv(void) |
54 | { | 118 | { |
55 | BUG_ON(!is_uv()); | 119 | BUG_ON(!is_uv()); |
56 | 120 | ||
57 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; | 121 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; |
58 | xp_partition_id = 0; /* !!! not correct value */ | 122 | xp_partition_id = sn_partition_id; |
59 | xp_region_size = 0; /* !!! not correct value */ | 123 | xp_region_size = sn_region_size; |
60 | 124 | ||
61 | xp_pa = xp_pa_uv; | 125 | xp_pa = xp_pa_uv; |
62 | xp_remote_memcpy = xp_remote_memcpy_uv; | 126 | xp_remote_memcpy = xp_remote_memcpy_uv; |
63 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; | 127 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; |
128 | xp_expand_memprotect = xp_expand_memprotect_uv; | ||
129 | xp_restrict_memprotect = xp_restrict_memprotect_uv; | ||
64 | 130 | ||
65 | return xpSuccess; | 131 | return xpSuccess; |
66 | } | 132 | } |
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 619208d61862..a5bd658c2e83 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 { | |||
181 | xpc_nasid_mask_nlongs)) | 181 | xpc_nasid_mask_nlongs)) |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * Info pertinent to a GRU message queue using a watch list for irq generation. | ||
185 | */ | ||
186 | struct xpc_gru_mq_uv { | ||
187 | void *address; /* address of GRU message queue */ | ||
188 | unsigned int order; /* size of GRU message queue as a power of 2 */ | ||
189 | int irq; /* irq raised when message is received in mq */ | ||
190 | int mmr_blade; /* blade where watchlist was allocated from */ | ||
191 | unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ | ||
192 | int watchlist_num; /* number of watchlist allocatd by BIOS */ | ||
193 | }; | ||
194 | |||
195 | /* | ||
184 | * The activate_mq is used to send/receive GRU messages that affect XPC's | 196 | * The activate_mq is used to send/receive GRU messages that affect XPC's |
185 | * heartbeat, partition active state, and channel state. This is UV only. | 197 | * heartbeat, partition active state, and channel state. This is UV only. |
186 | */ | 198 | */ |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index b4882ccf6344..73b7fb8de47a 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; | |||
553 | static enum xp_retval | 553 | static enum xp_retval |
554 | xpc_allow_amo_ops_sn2(struct amo *amos_page) | 554 | xpc_allow_amo_ops_sn2(struct amo *amos_page) |
555 | { | 555 | { |
556 | u64 nasid_array = 0; | 556 | enum xp_retval ret = xpSuccess; |
557 | int ret; | ||
558 | 557 | ||
559 | /* | 558 | /* |
560 | * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST | 559 | * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST |
561 | * collides with memory operations. On those systems we call | 560 | * collides with memory operations. On those systems we call |
562 | * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. | 561 | * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. |
563 | */ | 562 | */ |
564 | if (!enable_shub_wars_1_1()) { | 563 | if (!enable_shub_wars_1_1()) |
565 | ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, | 564 | ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE); |
566 | SN_MEMPROT_ACCESS_CLASS_1, | 565 | |
567 | &nasid_array); | 566 | return ret; |
568 | if (ret != 0) | ||
569 | return xpSalError; | ||
570 | } | ||
571 | return xpSuccess; | ||
572 | } | 567 | } |
573 | 568 | ||
574 | /* | 569 | /* |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 1ac694c01623..91a55b1b1037 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -18,7 +18,15 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/err.h> | ||
21 | #include <asm/uv/uv_hub.h> | 22 | #include <asm/uv/uv_hub.h> |
23 | #if defined CONFIG_X86_64 | ||
24 | #include <asm/uv/bios.h> | ||
25 | #include <asm/uv/uv_irq.h> | ||
26 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
27 | #include <asm/sn/intr.h> | ||
28 | #include <asm/sn/sn_sal.h> | ||
29 | #endif | ||
22 | #include "../sgi-gru/gru.h" | 30 | #include "../sgi-gru/gru.h" |
23 | #include "../sgi-gru/grukservices.h" | 31 | #include "../sgi-gru/grukservices.h" |
24 | #include "xpc.h" | 32 | #include "xpc.h" |
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv; | |||
27 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | 35 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
28 | 36 | ||
29 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) | 37 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
30 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) | 38 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
39 | XPC_ACTIVATE_MSG_SIZE_UV) | ||
40 | #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" | ||
31 | 41 | ||
32 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | 42 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) |
33 | XPC_ACTIVATE_MSG_SIZE_UV) | 43 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
34 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | 44 | XPC_NOTIFY_MSG_SIZE_UV) |
35 | XPC_NOTIFY_MSG_SIZE_UV) | 45 | #define XPC_NOTIFY_IRQ_NAME "xpc_notify" |
36 | 46 | ||
37 | static void *xpc_activate_mq_uv; | 47 | static struct xpc_gru_mq_uv *xpc_activate_mq_uv; |
38 | static void *xpc_notify_mq_uv; | 48 | static struct xpc_gru_mq_uv *xpc_notify_mq_uv; |
39 | 49 | ||
40 | static int | 50 | static int |
41 | xpc_setup_partitions_sn_uv(void) | 51 | xpc_setup_partitions_sn_uv(void) |
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void) | |||
52 | return 0; | 62 | return 0; |
53 | } | 63 | } |
54 | 64 | ||
55 | static void * | 65 | static int |
56 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, | 66 | xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) |
67 | { | ||
68 | #if defined CONFIG_X86_64 | ||
69 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | ||
70 | if (mq->irq < 0) { | ||
71 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | ||
72 | mq->irq); | ||
73 | } | ||
74 | |||
75 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
76 | int mmr_pnode; | ||
77 | unsigned long mmr_value; | ||
78 | |||
79 | if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) | ||
80 | mq->irq = SGI_XPC_ACTIVATE; | ||
81 | else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) | ||
82 | mq->irq = SGI_XPC_NOTIFY; | ||
83 | else | ||
84 | return -EINVAL; | ||
85 | |||
86 | mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
87 | mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; | ||
88 | |||
89 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | ||
90 | #else | ||
91 | #error not a supported configuration | ||
92 | #endif | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static void | ||
98 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | ||
99 | { | ||
100 | #if defined CONFIG_X86_64 | ||
101 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | ||
102 | |||
103 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
104 | int mmr_pnode; | ||
105 | unsigned long mmr_value; | ||
106 | |||
107 | mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
108 | mmr_value = 1UL << 16; | ||
109 | |||
110 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | ||
111 | #else | ||
112 | #error not a supported configuration | ||
113 | #endif | ||
114 | } | ||
115 | |||
116 | static int | ||
117 | xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | ||
118 | { | ||
119 | int ret; | ||
120 | |||
121 | #if defined CONFIG_X86_64 | ||
122 | ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), | ||
123 | mq->order, &mq->mmr_offset); | ||
124 | if (ret < 0) { | ||
125 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
126 | "ret=%d\n", ret); | ||
127 | return ret; | ||
128 | } | ||
129 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
130 | ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), | ||
131 | mq->order, &mq->mmr_offset); | ||
132 | if (ret < 0) { | ||
133 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | ||
134 | ret); | ||
135 | return -EBUSY; | ||
136 | } | ||
137 | #else | ||
138 | #error not a supported configuration | ||
139 | #endif | ||
140 | |||
141 | mq->watchlist_num = ret; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | ||
147 | { | ||
148 | int ret; | ||
149 | |||
150 | #if defined CONFIG_X86_64 | ||
151 | ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | ||
152 | BUG_ON(ret != BIOS_STATUS_SUCCESS); | ||
153 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
154 | ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | ||
155 | BUG_ON(ret != SALRET_OK); | ||
156 | #else | ||
157 | #error not a supported configuration | ||
158 | #endif | ||
159 | } | ||
160 | |||
161 | static struct xpc_gru_mq_uv * | ||
162 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | ||
57 | irq_handler_t irq_handler) | 163 | irq_handler_t irq_handler) |
58 | { | 164 | { |
165 | enum xp_retval xp_ret; | ||
59 | int ret; | 166 | int ret; |
60 | int nid; | 167 | int nid; |
61 | int mq_order; | 168 | int pg_order; |
62 | struct page *page; | 169 | struct page *page; |
63 | void *mq; | 170 | struct xpc_gru_mq_uv *mq; |
171 | |||
172 | mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); | ||
173 | if (mq == NULL) { | ||
174 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " | ||
175 | "a xpc_gru_mq_uv structure\n"); | ||
176 | ret = -ENOMEM; | ||
177 | goto out_1; | ||
178 | } | ||
179 | |||
180 | pg_order = get_order(mq_size); | ||
181 | mq->order = pg_order + PAGE_SHIFT; | ||
182 | mq_size = 1UL << mq->order; | ||
183 | |||
184 | mq->mmr_blade = uv_cpu_to_blade_id(cpu); | ||
64 | 185 | ||
65 | nid = cpu_to_node(cpuid); | 186 | nid = cpu_to_node(cpu); |
66 | mq_order = get_order(mq_size); | ||
67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 187 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
68 | mq_order); | 188 | pg_order); |
69 | if (page == NULL) { | 189 | if (page == NULL) { |
70 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | 190 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " |
71 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); | 191 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); |
72 | return NULL; | 192 | ret = -ENOMEM; |
193 | goto out_2; | ||
73 | } | 194 | } |
195 | mq->address = page_address(page); | ||
74 | 196 | ||
75 | mq = page_address(page); | 197 | ret = gru_create_message_queue(mq->address, mq_size); |
76 | ret = gru_create_message_queue(mq, mq_size); | ||
77 | if (ret != 0) { | 198 | if (ret != 0) { |
78 | dev_err(xpc_part, "gru_create_message_queue() returned " | 199 | dev_err(xpc_part, "gru_create_message_queue() returned " |
79 | "error=%d\n", ret); | 200 | "error=%d\n", ret); |
80 | free_pages((unsigned long)mq, mq_order); | 201 | ret = -EINVAL; |
81 | return NULL; | 202 | goto out_3; |
82 | } | 203 | } |
83 | 204 | ||
84 | /* !!! Need to do some other things to set up IRQ */ | 205 | /* enable generation of irq when GRU mq operation occurs to this mq */ |
206 | ret = xpc_gru_mq_watchlist_alloc_uv(mq); | ||
207 | if (ret != 0) | ||
208 | goto out_3; | ||
85 | 209 | ||
86 | ret = request_irq(irq, irq_handler, 0, "xpc", NULL); | 210 | ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); |
211 | if (ret != 0) | ||
212 | goto out_4; | ||
213 | |||
214 | ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); | ||
87 | if (ret != 0) { | 215 | if (ret != 0) { |
88 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", | 216 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", |
89 | irq, ret); | 217 | mq->irq, ret); |
90 | free_pages((unsigned long)mq, mq_order); | 218 | goto out_5; |
91 | return NULL; | ||
92 | } | 219 | } |
93 | 220 | ||
94 | /* !!! enable generation of irq when GRU mq op occurs to this mq */ | 221 | /* allow other partitions to access this GRU mq */ |
95 | 222 | xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); | |
96 | /* ??? allow other partitions to access GRU mq? */ | 223 | if (xp_ret != xpSuccess) { |
224 | ret = -EACCES; | ||
225 | goto out_6; | ||
226 | } | ||
97 | 227 | ||
98 | return mq; | 228 | return mq; |
229 | |||
230 | /* something went wrong */ | ||
231 | out_6: | ||
232 | free_irq(mq->irq, NULL); | ||
233 | out_5: | ||
234 | xpc_release_gru_mq_irq_uv(mq); | ||
235 | out_4: | ||
236 | xpc_gru_mq_watchlist_free_uv(mq); | ||
237 | out_3: | ||
238 | free_pages((unsigned long)mq->address, pg_order); | ||
239 | out_2: | ||
240 | kfree(mq); | ||
241 | out_1: | ||
242 | return ERR_PTR(ret); | ||
99 | } | 243 | } |
100 | 244 | ||
101 | static void | 245 | static void |
102 | xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) | 246 | xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) |
103 | { | 247 | { |
104 | /* ??? disallow other partitions to access GRU mq? */ | 248 | unsigned int mq_size; |
249 | int pg_order; | ||
250 | int ret; | ||
251 | |||
252 | /* disallow other partitions to access GRU mq */ | ||
253 | mq_size = 1UL << mq->order; | ||
254 | ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); | ||
255 | BUG_ON(ret != xpSuccess); | ||
105 | 256 | ||
106 | /* !!! disable generation of irq when GRU mq op occurs to this mq */ | 257 | /* unregister irq handler and release mq irq/vector mapping */ |
258 | free_irq(mq->irq, NULL); | ||
259 | xpc_release_gru_mq_irq_uv(mq); | ||
107 | 260 | ||
108 | free_irq(irq, NULL); | 261 | /* disable generation of irq when GRU mq op occurs to this mq */ |
262 | xpc_gru_mq_watchlist_free_uv(mq); | ||
109 | 263 | ||
110 | free_pages((unsigned long)mq, get_order(mq_size)); | 264 | pg_order = mq->order - PAGE_SHIFT; |
265 | free_pages((unsigned long)mq->address, pg_order); | ||
266 | |||
267 | kfree(mq); | ||
111 | } | 268 | } |
112 | 269 | ||
113 | static enum xp_retval | 270 | static enum xp_retval |
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | |||
402 | struct xpc_partition *part; | 559 | struct xpc_partition *part; |
403 | int wakeup_hb_checker = 0; | 560 | int wakeup_hb_checker = 0; |
404 | 561 | ||
405 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { | 562 | while (1) { |
563 | msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); | ||
564 | if (msg_hdr == NULL) | ||
565 | break; | ||
406 | 566 | ||
407 | partid = msg_hdr->partid; | 567 | partid = msg_hdr->partid; |
408 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | 568 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { |
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | |||
418 | } | 578 | } |
419 | } | 579 | } |
420 | 580 | ||
421 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | 581 | gru_free_message(xpc_activate_mq_uv->address, msg_hdr); |
422 | } | 582 | } |
423 | 583 | ||
424 | if (wakeup_hb_checker) | 584 | if (wakeup_hb_checker) |
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) | |||
482 | struct xpc_partition_uv *part_uv = &part->sn.uv; | 642 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
483 | 643 | ||
484 | /* | 644 | /* |
485 | * !!! Make our side think that the remote parition sent an activate | 645 | * !!! Make our side think that the remote partition sent an activate |
486 | * !!! message our way by doing what the activate IRQ handler would | 646 | * !!! message our way by doing what the activate IRQ handler would |
487 | * !!! do had one really been sent. | 647 | * !!! do had one really been sent. |
488 | */ | 648 | */ |
@@ -500,14 +660,39 @@ static enum xp_retval | |||
500 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, | 660 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, |
501 | size_t *len) | 661 | size_t *len) |
502 | { | 662 | { |
503 | /* !!! call the UV version of sn_partition_reserved_page_pa() */ | 663 | s64 status; |
504 | return xpUnsupported; | 664 | enum xp_retval ret; |
665 | |||
666 | #if defined CONFIG_X86_64 | ||
667 | status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, | ||
668 | (u64 *)len); | ||
669 | if (status == BIOS_STATUS_SUCCESS) | ||
670 | ret = xpSuccess; | ||
671 | else if (status == BIOS_STATUS_MORE_PASSES) | ||
672 | ret = xpNeedMoreInfo; | ||
673 | else | ||
674 | ret = xpBiosError; | ||
675 | |||
676 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
677 | status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); | ||
678 | if (status == SALRET_OK) | ||
679 | ret = xpSuccess; | ||
680 | else if (status == SALRET_MORE_PASSES) | ||
681 | ret = xpNeedMoreInfo; | ||
682 | else | ||
683 | ret = xpSalError; | ||
684 | |||
685 | #else | ||
686 | #error not a supported configuration | ||
687 | #endif | ||
688 | |||
689 | return ret; | ||
505 | } | 690 | } |
506 | 691 | ||
507 | static int | 692 | static int |
508 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) | 693 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) |
509 | { | 694 | { |
510 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); | 695 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); |
511 | return 0; | 696 | return 0; |
512 | } | 697 | } |
513 | 698 | ||
@@ -1411,22 +1596,18 @@ xpc_init_uv(void) | |||
1411 | return -E2BIG; | 1596 | return -E2BIG; |
1412 | } | 1597 | } |
1413 | 1598 | ||
1414 | /* ??? The cpuid argument's value is 0, is that what we want? */ | 1599 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, |
1415 | /* !!! The irq argument's value isn't correct. */ | 1600 | XPC_ACTIVATE_IRQ_NAME, |
1416 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, | ||
1417 | xpc_handle_activate_IRQ_uv); | 1601 | xpc_handle_activate_IRQ_uv); |
1418 | if (xpc_activate_mq_uv == NULL) | 1602 | if (IS_ERR(xpc_activate_mq_uv)) |
1419 | return -ENOMEM; | 1603 | return PTR_ERR(xpc_activate_mq_uv); |
1420 | 1604 | ||
1421 | /* ??? The cpuid argument's value is 0, is that what we want? */ | 1605 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, |
1422 | /* !!! The irq argument's value isn't correct. */ | 1606 | XPC_NOTIFY_IRQ_NAME, |
1423 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, | ||
1424 | xpc_handle_notify_IRQ_uv); | 1607 | xpc_handle_notify_IRQ_uv); |
1425 | if (xpc_notify_mq_uv == NULL) { | 1608 | if (IS_ERR(xpc_notify_mq_uv)) { |
1426 | /* !!! The irq argument's value isn't correct. */ | 1609 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); |
1427 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, | 1610 | return PTR_ERR(xpc_notify_mq_uv); |
1428 | XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
1429 | return -ENOMEM; | ||
1430 | } | 1611 | } |
1431 | 1612 | ||
1432 | return 0; | 1613 | return 0; |
@@ -1435,9 +1616,6 @@ xpc_init_uv(void) | |||
1435 | void | 1616 | void |
1436 | xpc_exit_uv(void) | 1617 | xpc_exit_uv(void) |
1437 | { | 1618 | { |
1438 | /* !!! The irq argument's value isn't correct. */ | 1619 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); |
1439 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); | 1620 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); |
1440 | |||
1441 | /* !!! The irq argument's value isn't correct. */ | ||
1442 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
1443 | } | 1621 | } |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index b7ad2829d67e..ac57b6a42c6e 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mlx4_cq_resize); | |||
189 | 189 | ||
190 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 190 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
191 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 191 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
192 | int collapsed) | 192 | unsigned vector, int collapsed) |
193 | { | 193 | { |
194 | struct mlx4_priv *priv = mlx4_priv(dev); | 194 | struct mlx4_priv *priv = mlx4_priv(dev); |
195 | struct mlx4_cq_table *cq_table = &priv->cq_table; | 195 | struct mlx4_cq_table *cq_table = &priv->cq_table; |
@@ -198,6 +198,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
198 | u64 mtt_addr; | 198 | u64 mtt_addr; |
199 | int err; | 199 | int err; |
200 | 200 | ||
201 | if (vector >= dev->caps.num_comp_vectors) | ||
202 | return -EINVAL; | ||
203 | |||
204 | cq->vector = vector; | ||
205 | |||
201 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); | 206 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); |
202 | if (cq->cqn == -1) | 207 | if (cq->cqn == -1) |
203 | return -ENOMEM; | 208 | return -ENOMEM; |
@@ -227,7 +232,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
227 | 232 | ||
228 | cq_context->flags = cpu_to_be32(!!collapsed << 18); | 233 | cq_context->flags = cpu_to_be32(!!collapsed << 18); |
229 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); | 234 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); |
230 | cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; | 235 | cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; |
231 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | 236 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; |
232 | 237 | ||
233 | mtt_addr = mlx4_mtt_addr(dev, mtt); | 238 | mtt_addr = mlx4_mtt_addr(dev, mtt); |
@@ -276,7 +281,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) | |||
276 | if (err) | 281 | if (err) |
277 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); | 282 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); |
278 | 283 | ||
279 | synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq); | 284 | synchronize_irq(priv->eq_table.eq[cq->vector].irq); |
280 | 285 | ||
281 | spin_lock_irq(&cq_table->lock); | 286 | spin_lock_irq(&cq_table->lock); |
282 | radix_tree_delete(&cq_table->tree, cq->cqn); | 287 | radix_tree_delete(&cq_table->tree, cq->cqn); |
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 23d54a0e681e..91f50de84be9 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -51,10 +51,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
51 | int err; | 51 | int err; |
52 | 52 | ||
53 | cq->size = entries; | 53 | cq->size = entries; |
54 | if (mode == RX) | 54 | if (mode == RX) { |
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | 55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); |
56 | else | 56 | cq->vector = ring % mdev->dev->caps.num_comp_vectors; |
57 | } else { | ||
57 | cq->buf_size = sizeof(struct mlx4_cqe); | 58 | cq->buf_size = sizeof(struct mlx4_cqe); |
59 | cq->vector = 0; | ||
60 | } | ||
58 | 61 | ||
59 | cq->ring = ring; | 62 | cq->ring = ring; |
60 | cq->is_tx = mode; | 63 | cq->is_tx = mode; |
@@ -87,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
87 | memset(cq->buf, 0, cq->buf_size); | 90 | memset(cq->buf, 0, cq->buf_size); |
88 | 91 | ||
89 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, | 92 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, |
90 | cq->wqres.db.dma, &cq->mcq, cq->is_tx); | 93 | cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); |
91 | if (err) | 94 | if (err) |
92 | return err; | 95 | return err; |
93 | 96 | ||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 4b9794e97a79..c1c05852a95e 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
@@ -170,9 +170,9 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | 170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", |
171 | mdev->profile.prof[i].tx_ring_num, i); | 171 | mdev->profile.prof[i].tx_ring_num, i); |
172 | if (!mdev->profile.prof[i].rx_ring_num) { | 172 | if (!mdev->profile.prof[i].rx_ring_num) { |
173 | mdev->profile.prof[i].rx_ring_num = 1; | 173 | mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; |
174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | 174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", |
175 | 1, i); | 175 | mdev->profile.prof[i].rx_ring_num, i); |
176 | } else | 176 | } else |
177 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", | 177 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", |
178 | mdev->profile.prof[i].rx_ring_num, i); | 178 | mdev->profile.prof[i].rx_ring_num, i); |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index de169338cd90..2c19bff7cbab 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -243,10 +243,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
243 | * least that often. | 243 | * least that often. |
244 | */ | 244 | */ |
245 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { | 245 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { |
246 | /* | ||
247 | * Conditional on hca_type is OK here because | ||
248 | * this is a rare case, not the fast path. | ||
249 | */ | ||
250 | eq_set_ci(eq, 0); | 246 | eq_set_ci(eq, 0); |
251 | set_ci = 0; | 247 | set_ci = 0; |
252 | } | 248 | } |
@@ -266,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |||
266 | 262 | ||
267 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | 263 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); |
268 | 264 | ||
269 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 265 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
270 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); | 266 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
271 | 267 | ||
272 | return IRQ_RETVAL(work); | 268 | return IRQ_RETVAL(work); |
@@ -304,6 +300,17 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |||
304 | MLX4_CMD_TIME_CLASS_A); | 300 | MLX4_CMD_TIME_CLASS_A); |
305 | } | 301 | } |
306 | 302 | ||
303 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) | ||
304 | { | ||
305 | /* | ||
306 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs | ||
307 | * we need to map, take the difference of highest index and | ||
308 | * the lowest index we'll use and add 1. | ||
309 | */ | ||
310 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | ||
311 | dev->caps.reserved_eqs / 4 + 1; | ||
312 | } | ||
313 | |||
307 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | 314 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
308 | { | 315 | { |
309 | struct mlx4_priv *priv = mlx4_priv(dev); | 316 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -483,9 +490,11 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) | |||
483 | 490 | ||
484 | if (eq_table->have_irq) | 491 | if (eq_table->have_irq) |
485 | free_irq(dev->pdev->irq, dev); | 492 | free_irq(dev->pdev->irq, dev); |
486 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 493 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
487 | if (eq_table->eq[i].have_irq) | 494 | if (eq_table->eq[i].have_irq) |
488 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 495 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
496 | |||
497 | kfree(eq_table->irq_names); | ||
489 | } | 498 | } |
490 | 499 | ||
491 | static int mlx4_map_clr_int(struct mlx4_dev *dev) | 500 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
@@ -551,57 +560,93 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | |||
551 | __free_page(priv->eq_table.icm_page); | 560 | __free_page(priv->eq_table.icm_page); |
552 | } | 561 | } |
553 | 562 | ||
563 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | ||
564 | { | ||
565 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
566 | |||
567 | priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, | ||
568 | sizeof *priv->eq_table.eq, GFP_KERNEL); | ||
569 | if (!priv->eq_table.eq) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | void mlx4_free_eq_table(struct mlx4_dev *dev) | ||
576 | { | ||
577 | kfree(mlx4_priv(dev)->eq_table.eq); | ||
578 | } | ||
579 | |||
554 | int mlx4_init_eq_table(struct mlx4_dev *dev) | 580 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
555 | { | 581 | { |
556 | struct mlx4_priv *priv = mlx4_priv(dev); | 582 | struct mlx4_priv *priv = mlx4_priv(dev); |
557 | int err; | 583 | int err; |
558 | int i; | 584 | int i; |
559 | 585 | ||
586 | priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, | ||
587 | mlx4_num_eq_uar(dev), GFP_KERNEL); | ||
588 | if (!priv->eq_table.uar_map) { | ||
589 | err = -ENOMEM; | ||
590 | goto err_out_free; | ||
591 | } | ||
592 | |||
560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | 593 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, |
561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); | 594 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); |
562 | if (err) | 595 | if (err) |
563 | return err; | 596 | goto err_out_free; |
564 | 597 | ||
565 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | 598 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
566 | priv->eq_table.uar_map[i] = NULL; | 599 | priv->eq_table.uar_map[i] = NULL; |
567 | 600 | ||
568 | err = mlx4_map_clr_int(dev); | 601 | err = mlx4_map_clr_int(dev); |
569 | if (err) | 602 | if (err) |
570 | goto err_out_free; | 603 | goto err_out_bitmap; |
571 | 604 | ||
572 | priv->eq_table.clr_mask = | 605 | priv->eq_table.clr_mask = |
573 | swab32(1 << (priv->eq_table.inta_pin & 31)); | 606 | swab32(1 << (priv->eq_table.inta_pin & 31)); |
574 | priv->eq_table.clr_int = priv->clr_base + | 607 | priv->eq_table.clr_int = priv->clr_base + |
575 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 608 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
576 | 609 | ||
577 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | 610 | priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); |
578 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, | 611 | if (!priv->eq_table.irq_names) { |
579 | &priv->eq_table.eq[MLX4_EQ_COMP]); | 612 | err = -ENOMEM; |
580 | if (err) | 613 | goto err_out_bitmap; |
581 | goto err_out_unmap; | 614 | } |
615 | |||
616 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { | ||
617 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | ||
618 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
619 | &priv->eq_table.eq[i]); | ||
620 | if (err) | ||
621 | goto err_out_unmap; | ||
622 | } | ||
582 | 623 | ||
583 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | 624 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, |
584 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, | 625 | (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, |
585 | &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 626 | &priv->eq_table.eq[dev->caps.num_comp_vectors]); |
586 | if (err) | 627 | if (err) |
587 | goto err_out_comp; | 628 | goto err_out_comp; |
588 | 629 | ||
589 | if (dev->flags & MLX4_FLAG_MSI_X) { | 630 | if (dev->flags & MLX4_FLAG_MSI_X) { |
590 | static const char *eq_name[] = { | 631 | static const char async_eq_name[] = "mlx4-async"; |
591 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", | 632 | const char *eq_name; |
592 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)" | 633 | |
593 | }; | 634 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
635 | if (i < dev->caps.num_comp_vectors) { | ||
636 | snprintf(priv->eq_table.irq_names + i * 16, 16, | ||
637 | "mlx4-comp-%d", i); | ||
638 | eq_name = priv->eq_table.irq_names + i * 16; | ||
639 | } else | ||
640 | eq_name = async_eq_name; | ||
594 | 641 | ||
595 | for (i = 0; i < MLX4_NUM_EQ; ++i) { | ||
596 | err = request_irq(priv->eq_table.eq[i].irq, | 642 | err = request_irq(priv->eq_table.eq[i].irq, |
597 | mlx4_msi_x_interrupt, | 643 | mlx4_msi_x_interrupt, 0, eq_name, |
598 | 0, eq_name[i], priv->eq_table.eq + i); | 644 | priv->eq_table.eq + i); |
599 | if (err) | 645 | if (err) |
600 | goto err_out_async; | 646 | goto err_out_async; |
601 | 647 | ||
602 | priv->eq_table.eq[i].have_irq = 1; | 648 | priv->eq_table.eq[i].have_irq = 1; |
603 | } | 649 | } |
604 | |||
605 | } else { | 650 | } else { |
606 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 651 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
607 | IRQF_SHARED, DRV_NAME, dev); | 652 | IRQF_SHARED, DRV_NAME, dev); |
@@ -612,28 +657,36 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
612 | } | 657 | } |
613 | 658 | ||
614 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | 659 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, |
615 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 660 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
616 | if (err) | 661 | if (err) |
617 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 662 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
618 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); | 663 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); |
619 | 664 | ||
620 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 665 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
621 | eq_set_ci(&priv->eq_table.eq[i], 1); | 666 | eq_set_ci(&priv->eq_table.eq[i], 1); |
622 | 667 | ||
623 | return 0; | 668 | return 0; |
624 | 669 | ||
625 | err_out_async: | 670 | err_out_async: |
626 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 671 | mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); |
627 | 672 | ||
628 | err_out_comp: | 673 | err_out_comp: |
629 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); | 674 | i = dev->caps.num_comp_vectors - 1; |
630 | 675 | ||
631 | err_out_unmap: | 676 | err_out_unmap: |
677 | while (i >= 0) { | ||
678 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | ||
679 | --i; | ||
680 | } | ||
632 | mlx4_unmap_clr_int(dev); | 681 | mlx4_unmap_clr_int(dev); |
633 | mlx4_free_irqs(dev); | 682 | mlx4_free_irqs(dev); |
634 | 683 | ||
635 | err_out_free: | 684 | err_out_bitmap: |
636 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | 685 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
686 | |||
687 | err_out_free: | ||
688 | kfree(priv->eq_table.uar_map); | ||
689 | |||
637 | return err; | 690 | return err; |
638 | } | 691 | } |
639 | 692 | ||
@@ -643,18 +696,20 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
643 | int i; | 696 | int i; |
644 | 697 | ||
645 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | 698 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
646 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 699 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
647 | 700 | ||
648 | mlx4_free_irqs(dev); | 701 | mlx4_free_irqs(dev); |
649 | 702 | ||
650 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 703 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
651 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 704 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
652 | 705 | ||
653 | mlx4_unmap_clr_int(dev); | 706 | mlx4_unmap_clr_int(dev); |
654 | 707 | ||
655 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | 708 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
656 | if (priv->eq_table.uar_map[i]) | 709 | if (priv->eq_table.uar_map[i]) |
657 | iounmap(priv->eq_table.uar_map[i]); | 710 | iounmap(priv->eq_table.uar_map[i]); |
658 | 711 | ||
659 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | 712 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
713 | |||
714 | kfree(priv->eq_table.uar_map); | ||
660 | } | 715 | } |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 90a0281d15ea..710c79e7a2db 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -421,9 +421,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
421 | ((u64) (MLX4_CMPT_TYPE_EQ * | 421 | ((u64) (MLX4_CMPT_TYPE_EQ * |
422 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 422 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
423 | cmpt_entry_sz, | 423 | cmpt_entry_sz, |
424 | roundup_pow_of_two(MLX4_NUM_EQ + | 424 | dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); |
425 | dev->caps.reserved_eqs), | ||
426 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); | ||
427 | if (err) | 425 | if (err) |
428 | goto err_cq; | 426 | goto err_cq; |
429 | 427 | ||
@@ -810,12 +808,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
810 | if (dev->flags & MLX4_FLAG_MSI_X) { | 808 | if (dev->flags & MLX4_FLAG_MSI_X) { |
811 | mlx4_warn(dev, "NOP command failed to generate MSI-X " | 809 | mlx4_warn(dev, "NOP command failed to generate MSI-X " |
812 | "interrupt IRQ %d).\n", | 810 | "interrupt IRQ %d).\n", |
813 | priv->eq_table.eq[MLX4_EQ_ASYNC].irq); | 811 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); |
814 | mlx4_warn(dev, "Trying again without MSI-X.\n"); | 812 | mlx4_warn(dev, "Trying again without MSI-X.\n"); |
815 | } else { | 813 | } else { |
816 | mlx4_err(dev, "NOP command failed to generate interrupt " | 814 | mlx4_err(dev, "NOP command failed to generate interrupt " |
817 | "(IRQ %d), aborting.\n", | 815 | "(IRQ %d), aborting.\n", |
818 | priv->eq_table.eq[MLX4_EQ_ASYNC].irq); | 816 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); |
819 | mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); | 817 | mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); |
820 | } | 818 | } |
821 | 819 | ||
@@ -908,31 +906,50 @@ err_uar_table_free: | |||
908 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) | 906 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
909 | { | 907 | { |
910 | struct mlx4_priv *priv = mlx4_priv(dev); | 908 | struct mlx4_priv *priv = mlx4_priv(dev); |
911 | struct msix_entry entries[MLX4_NUM_EQ]; | 909 | struct msix_entry *entries; |
910 | int nreq; | ||
912 | int err; | 911 | int err; |
913 | int i; | 912 | int i; |
914 | 913 | ||
915 | if (msi_x) { | 914 | if (msi_x) { |
916 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 915 | nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs, |
916 | num_possible_cpus() + 1); | ||
917 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | ||
918 | if (!entries) | ||
919 | goto no_msi; | ||
920 | |||
921 | for (i = 0; i < nreq; ++i) | ||
917 | entries[i].entry = i; | 922 | entries[i].entry = i; |
918 | 923 | ||
919 | err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); | 924 | retry: |
925 | err = pci_enable_msix(dev->pdev, entries, nreq); | ||
920 | if (err) { | 926 | if (err) { |
921 | if (err > 0) | 927 | /* Try again if at least 2 vectors are available */ |
922 | mlx4_info(dev, "Only %d MSI-X vectors available, " | 928 | if (err > 1) { |
923 | "not using MSI-X\n", err); | 929 | mlx4_info(dev, "Requested %d vectors, " |
930 | "but only %d MSI-X vectors available, " | ||
931 | "trying again\n", nreq, err); | ||
932 | nreq = err; | ||
933 | goto retry; | ||
934 | } | ||
935 | |||
924 | goto no_msi; | 936 | goto no_msi; |
925 | } | 937 | } |
926 | 938 | ||
927 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 939 | dev->caps.num_comp_vectors = nreq - 1; |
940 | for (i = 0; i < nreq; ++i) | ||
928 | priv->eq_table.eq[i].irq = entries[i].vector; | 941 | priv->eq_table.eq[i].irq = entries[i].vector; |
929 | 942 | ||
930 | dev->flags |= MLX4_FLAG_MSI_X; | 943 | dev->flags |= MLX4_FLAG_MSI_X; |
944 | |||
945 | kfree(entries); | ||
931 | return; | 946 | return; |
932 | } | 947 | } |
933 | 948 | ||
934 | no_msi: | 949 | no_msi: |
935 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 950 | dev->caps.num_comp_vectors = 1; |
951 | |||
952 | for (i = 0; i < 2; ++i) | ||
936 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 953 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
937 | } | 954 | } |
938 | 955 | ||
@@ -1074,6 +1091,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1074 | if (err) | 1091 | if (err) |
1075 | goto err_cmd; | 1092 | goto err_cmd; |
1076 | 1093 | ||
1094 | err = mlx4_alloc_eq_table(dev); | ||
1095 | if (err) | ||
1096 | goto err_close; | ||
1097 | |||
1077 | mlx4_enable_msi_x(dev); | 1098 | mlx4_enable_msi_x(dev); |
1078 | 1099 | ||
1079 | err = mlx4_setup_hca(dev); | 1100 | err = mlx4_setup_hca(dev); |
@@ -1084,7 +1105,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1084 | } | 1105 | } |
1085 | 1106 | ||
1086 | if (err) | 1107 | if (err) |
1087 | goto err_close; | 1108 | goto err_free_eq; |
1088 | 1109 | ||
1089 | for (port = 1; port <= dev->caps.num_ports; port++) { | 1110 | for (port = 1; port <= dev->caps.num_ports; port++) { |
1090 | err = mlx4_init_port_info(dev, port); | 1111 | err = mlx4_init_port_info(dev, port); |
@@ -1114,6 +1135,9 @@ err_port: | |||
1114 | mlx4_cleanup_pd_table(dev); | 1135 | mlx4_cleanup_pd_table(dev); |
1115 | mlx4_cleanup_uar_table(dev); | 1136 | mlx4_cleanup_uar_table(dev); |
1116 | 1137 | ||
1138 | err_free_eq: | ||
1139 | mlx4_free_eq_table(dev); | ||
1140 | |||
1117 | err_close: | 1141 | err_close: |
1118 | if (dev->flags & MLX4_FLAG_MSI_X) | 1142 | if (dev->flags & MLX4_FLAG_MSI_X) |
1119 | pci_disable_msix(pdev); | 1143 | pci_disable_msix(pdev); |
@@ -1177,6 +1201,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1177 | iounmap(priv->kar); | 1201 | iounmap(priv->kar); |
1178 | mlx4_uar_free(dev, &priv->driver_uar); | 1202 | mlx4_uar_free(dev, &priv->driver_uar); |
1179 | mlx4_cleanup_uar_table(dev); | 1203 | mlx4_cleanup_uar_table(dev); |
1204 | mlx4_free_eq_table(dev); | ||
1180 | mlx4_close_hca(dev); | 1205 | mlx4_close_hca(dev); |
1181 | mlx4_cmd_cleanup(dev); | 1206 | mlx4_cmd_cleanup(dev); |
1182 | 1207 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 34c909deaff3..e0213bad61c7 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -63,12 +63,6 @@ enum { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | enum { | 65 | enum { |
66 | MLX4_EQ_ASYNC, | ||
67 | MLX4_EQ_COMP, | ||
68 | MLX4_NUM_EQ | ||
69 | }; | ||
70 | |||
71 | enum { | ||
72 | MLX4_NUM_PDS = 1 << 15 | 66 | MLX4_NUM_PDS = 1 << 15 |
73 | }; | 67 | }; |
74 | 68 | ||
@@ -205,10 +199,11 @@ struct mlx4_cq_table { | |||
205 | 199 | ||
206 | struct mlx4_eq_table { | 200 | struct mlx4_eq_table { |
207 | struct mlx4_bitmap bitmap; | 201 | struct mlx4_bitmap bitmap; |
202 | char *irq_names; | ||
208 | void __iomem *clr_int; | 203 | void __iomem *clr_int; |
209 | void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4]; | 204 | void __iomem **uar_map; |
210 | u32 clr_mask; | 205 | u32 clr_mask; |
211 | struct mlx4_eq eq[MLX4_NUM_EQ]; | 206 | struct mlx4_eq *eq; |
212 | u64 icm_virt; | 207 | u64 icm_virt; |
213 | struct page *icm_page; | 208 | struct page *icm_page; |
214 | dma_addr_t icm_dma; | 209 | dma_addr_t icm_dma; |
@@ -328,6 +323,9 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | |||
328 | 323 | ||
329 | int mlx4_reset(struct mlx4_dev *dev); | 324 | int mlx4_reset(struct mlx4_dev *dev); |
330 | 325 | ||
326 | int mlx4_alloc_eq_table(struct mlx4_dev *dev); | ||
327 | void mlx4_free_eq_table(struct mlx4_dev *dev); | ||
328 | |||
331 | int mlx4_init_pd_table(struct mlx4_dev *dev); | 329 | int mlx4_init_pd_table(struct mlx4_dev *dev); |
332 | int mlx4_init_uar_table(struct mlx4_dev *dev); | 330 | int mlx4_init_uar_table(struct mlx4_dev *dev); |
333 | int mlx4_init_mr_table(struct mlx4_dev *dev); | 331 | int mlx4_init_mr_table(struct mlx4_dev *dev); |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index 9ca42b213d54..919fb9eb1b62 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
@@ -107,7 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
107 | profile[MLX4_RES_AUXC].num = request->num_qp; | 107 | profile[MLX4_RES_AUXC].num = request->num_qp; |
108 | profile[MLX4_RES_SRQ].num = request->num_srq; | 108 | profile[MLX4_RES_SRQ].num = request->num_srq; |
109 | profile[MLX4_RES_CQ].num = request->num_cq; | 109 | profile[MLX4_RES_CQ].num = request->num_cq; |
110 | profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs; | 110 | profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, |
111 | dev_cap->reserved_eqs + | ||
112 | num_possible_cpus() + 1); | ||
111 | profile[MLX4_RES_DMPT].num = request->num_mpt; | 113 | profile[MLX4_RES_DMPT].num = request->num_mpt; |
112 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; | 114 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; |
113 | profile[MLX4_RES_MTT].num = request->num_mtt; | 115 | profile[MLX4_RES_MTT].num = request->num_mtt; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fd0b11ea5562..666c1d98cdaf 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -715,6 +715,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
715 | struct tun_net *tn; | 715 | struct tun_net *tn; |
716 | struct tun_struct *tun; | 716 | struct tun_struct *tun; |
717 | struct net_device *dev; | 717 | struct net_device *dev; |
718 | const struct cred *cred = current_cred(); | ||
718 | int err; | 719 | int err; |
719 | 720 | ||
720 | tn = net_generic(net, tun_net_id); | 721 | tn = net_generic(net, tun_net_id); |
@@ -725,11 +726,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
725 | 726 | ||
726 | /* Check permissions */ | 727 | /* Check permissions */ |
727 | if (((tun->owner != -1 && | 728 | if (((tun->owner != -1 && |
728 | current->euid != tun->owner) || | 729 | cred->euid != tun->owner) || |
729 | (tun->group != -1 && | 730 | (tun->group != -1 && |
730 | current->egid != tun->group)) && | 731 | cred->egid != tun->group)) && |
731 | !capable(CAP_NET_ADMIN)) | 732 | !capable(CAP_NET_ADMIN)) { |
732 | return -EPERM; | 733 | return -EPERM; |
734 | } | ||
733 | } | 735 | } |
734 | else if (__dev_get_by_name(net, ifr->ifr_name)) | 736 | else if (__dev_get_by_name(net, ifr->ifr_name)) |
735 | return -EINVAL; | 737 | return -EINVAL; |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index f9e244da30ae..9bcb6cbd5aa9 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -113,7 +113,7 @@ struct acpiphp_slot { | |||
113 | 113 | ||
114 | u8 device; /* pci device# */ | 114 | u8 device; /* pci device# */ |
115 | 115 | ||
116 | u32 sun; /* ACPI _SUN (slot unique number) */ | 116 | unsigned long long sun; /* ACPI _SUN (slot unique number) */ |
117 | u32 flags; /* see below */ | 117 | u32 flags; /* see below */ |
118 | }; | 118 | }; |
119 | 119 | ||
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 95b536a23d25..43c10bd261b4 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -337,7 +337,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) | |||
337 | slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; | 337 | slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; |
338 | 338 | ||
339 | acpiphp_slot->slot = slot; | 339 | acpiphp_slot->slot = slot; |
340 | snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun); | 340 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); |
341 | 341 | ||
342 | retval = pci_hp_register(slot->hotplug_slot, | 342 | retval = pci_hp_register(slot->hotplug_slot, |
343 | acpiphp_slot->bridge->pci_bus, | 343 | acpiphp_slot->bridge->pci_bus, |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 955aae4071f7..3affc6472e65 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -255,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
255 | 255 | ||
256 | bridge->nr_slots++; | 256 | bridge->nr_slots++; |
257 | 257 | ||
258 | dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n", | 258 | dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", |
259 | slot->sun, pci_domain_nr(bridge->pci_bus), | 259 | slot->sun, pci_domain_nr(bridge->pci_bus), |
260 | bridge->pci_bus->number, slot->device); | 260 | bridge->pci_bus->number, slot->device); |
261 | retval = acpiphp_register_hotplug_slot(slot); | 261 | retval = acpiphp_register_hotplug_slot(slot); |
262 | if (retval) { | 262 | if (retval) { |
263 | if (retval == -EBUSY) | 263 | if (retval == -EBUSY) |
264 | warn("Slot %d already registered by another " | 264 | warn("Slot %llu already registered by another " |
265 | "hotplug driver\n", slot->sun); | 265 | "hotplug driver\n", slot->sun); |
266 | else | 266 | else |
267 | warn("acpiphp_register_hotplug_slot failed " | 267 | warn("acpiphp_register_hotplug_slot failed " |
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index c892daae74d6..633e743442ac 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void) | |||
1402 | goto error; | 1402 | goto error; |
1403 | } | 1403 | } |
1404 | 1404 | ||
1405 | /* lock ourselves into memory with a module | ||
1406 | * count of -1 so that no one can unload us. */ | ||
1407 | module_put(THIS_MODULE); | ||
1408 | |||
1409 | exit: | 1405 | exit: |
1410 | return rc; | 1406 | return rc; |
1411 | 1407 | ||
@@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void) | |||
1423 | } | 1419 | } |
1424 | 1420 | ||
1425 | module_init(ibmphp_init); | 1421 | module_init(ibmphp_init); |
1426 | module_exit(ibmphp_exit); | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4b23bc39b11e..39cf248d24e3 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -432,18 +432,19 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ | |||
432 | goto err_out_release_ctlr; | 432 | goto err_out_release_ctlr; |
433 | } | 433 | } |
434 | 434 | ||
435 | /* Check if slot is occupied */ | ||
435 | t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | 436 | t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); |
436 | 437 | t_slot->hpc_ops->get_adapter_status(t_slot, &value); | |
437 | t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ | 438 | if (value) { |
438 | if (value && pciehp_force) { | 439 | if (pciehp_force) |
439 | rc = pciehp_enable_slot(t_slot); | 440 | pciehp_enable_slot(t_slot); |
440 | if (rc) /* -ENODEV: shouldn't happen, but deal with it */ | 441 | } else { |
441 | value = 0; | 442 | /* Power off slot if not occupied */ |
442 | } | 443 | if (POWER_CTRL(ctrl)) { |
443 | if ((POWER_CTRL(ctrl)) && !value) { | 444 | rc = t_slot->hpc_ops->power_off_slot(t_slot); |
444 | rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ | 445 | if (rc) |
445 | if (rc) | 446 | goto err_out_free_ctrl_slot; |
446 | goto err_out_free_ctrl_slot; | 447 | } |
447 | } | 448 | } |
448 | 449 | ||
449 | return 0; | 450 | return 0; |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index dfc63d01f20a..aac7006949f1 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data) | |||
252 | 252 | ||
253 | if (!dev->driver || | 253 | if (!dev->driver || |
254 | !dev->driver->err_handler || | 254 | !dev->driver->err_handler || |
255 | !dev->driver->err_handler->slot_reset) | 255 | !dev->driver->err_handler->resume) |
256 | return; | 256 | return; |
257 | 257 | ||
258 | err_handler = dev->driver->err_handler; | 258 | err_handler = dev->driver->err_handler; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5f4f85f56cb7..ce0985615133 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -606,27 +606,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev) | |||
606 | sis_apic_bug = 1; | 606 | sis_apic_bug = 1; |
607 | } | 607 | } |
608 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); | 608 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); |
609 | |||
610 | #define AMD8131_revA0 0x01 | ||
611 | #define AMD8131_revB0 0x11 | ||
612 | #define AMD8131_MISC 0x40 | ||
613 | #define AMD8131_NIOAMODE_BIT 0 | ||
614 | static void quirk_amd_8131_ioapic(struct pci_dev *dev) | ||
615 | { | ||
616 | unsigned char tmp; | ||
617 | |||
618 | if (nr_ioapics == 0) | ||
619 | return; | ||
620 | |||
621 | if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) { | ||
622 | dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n"); | ||
623 | pci_read_config_byte( dev, AMD8131_MISC, &tmp); | ||
624 | tmp &= ~(1 << AMD8131_NIOAMODE_BIT); | ||
625 | pci_write_config_byte( dev, AMD8131_MISC, tmp); | ||
626 | } | ||
627 | } | ||
628 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | ||
629 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | ||
630 | #endif /* CONFIG_X86_IO_APIC */ | 609 | #endif /* CONFIG_X86_IO_APIC */ |
631 | 610 | ||
632 | /* | 611 | /* |
@@ -1423,6 +1402,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); | |||
1423 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); | 1402 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); |
1424 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | 1403 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); |
1425 | 1404 | ||
1405 | #ifdef CONFIG_X86_IO_APIC | ||
1406 | /* | ||
1407 | * Boot interrupts on some chipsets cannot be turned off. For these chipsets, | ||
1408 | * remap the original interrupt in the linux kernel to the boot interrupt, so | ||
1409 | * that a PCI device's interrupt handler is installed on the boot interrupt | ||
1410 | * line instead. | ||
1411 | */ | ||
1412 | static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) | ||
1413 | { | ||
1414 | if (noioapicquirk || noioapicreroute) | ||
1415 | return; | ||
1416 | |||
1417 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; | ||
1418 | |||
1419 | printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", | ||
1420 | dev->vendor, dev->device); | ||
1421 | return; | ||
1422 | } | ||
1423 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | ||
1424 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | ||
1425 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); | ||
1426 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); | ||
1427 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); | ||
1428 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); | ||
1429 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); | ||
1430 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); | ||
1431 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | ||
1432 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | ||
1433 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); | ||
1434 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); | ||
1435 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); | ||
1436 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); | ||
1437 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); | ||
1438 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); | ||
1439 | |||
1440 | /* | ||
1441 | * On some chipsets we can disable the generation of legacy INTx boot | ||
1442 | * interrupts. | ||
1443 | */ | ||
1444 | |||
1445 | /* | ||
1446 | * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no | ||
1447 | * 300641-004US, section 5.7.3. | ||
1448 | */ | ||
1449 | #define INTEL_6300_IOAPIC_ABAR 0x40 | ||
1450 | #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) | ||
1451 | |||
1452 | static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) | ||
1453 | { | ||
1454 | u16 pci_config_word; | ||
1455 | |||
1456 | if (noioapicquirk) | ||
1457 | return; | ||
1458 | |||
1459 | pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); | ||
1460 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; | ||
1461 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); | ||
1462 | |||
1463 | printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", | ||
1464 | dev->vendor, dev->device); | ||
1465 | } | ||
1466 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | ||
1467 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | ||
1468 | |||
1469 | /* | ||
1470 | * disable boot interrupts on HT-1000 | ||
1471 | */ | ||
1472 | #define BC_HT1000_FEATURE_REG 0x64 | ||
1473 | #define BC_HT1000_PIC_REGS_ENABLE (1<<0) | ||
1474 | #define BC_HT1000_MAP_IDX 0xC00 | ||
1475 | #define BC_HT1000_MAP_DATA 0xC01 | ||
1476 | |||
1477 | static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) | ||
1478 | { | ||
1479 | u32 pci_config_dword; | ||
1480 | u8 irq; | ||
1481 | |||
1482 | if (noioapicquirk) | ||
1483 | return; | ||
1484 | |||
1485 | pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); | ||
1486 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | | ||
1487 | BC_HT1000_PIC_REGS_ENABLE); | ||
1488 | |||
1489 | for (irq = 0x10; irq < 0x10 + 32; irq++) { | ||
1490 | outb(irq, BC_HT1000_MAP_IDX); | ||
1491 | outb(0x00, BC_HT1000_MAP_DATA); | ||
1492 | } | ||
1493 | |||
1494 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); | ||
1495 | |||
1496 | printk(KERN_INFO "disabled boot interrupts on PCI device" | ||
1497 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1498 | } | ||
1499 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | ||
1500 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | ||
1501 | |||
1502 | /* | ||
1503 | * disable boot interrupts on AMD and ATI chipsets | ||
1504 | */ | ||
1505 | /* | ||
1506 | * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 | ||
1507 | * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode | ||
1508 | * (due to an erratum). | ||
1509 | */ | ||
1510 | #define AMD_813X_MISC 0x40 | ||
1511 | #define AMD_813X_NOIOAMODE (1<<0) | ||
1512 | |||
1513 | static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | ||
1514 | { | ||
1515 | u32 pci_config_dword; | ||
1516 | |||
1517 | if (noioapicquirk) | ||
1518 | return; | ||
1519 | |||
1520 | pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); | ||
1521 | pci_config_dword &= ~AMD_813X_NOIOAMODE; | ||
1522 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); | ||
1523 | |||
1524 | printk(KERN_INFO "disabled boot interrupts on PCI device " | ||
1525 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1526 | } | ||
1527 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
1528 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
1529 | |||
1530 | #define AMD_8111_PCI_IRQ_ROUTING 0x56 | ||
1531 | |||
1532 | static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) | ||
1533 | { | ||
1534 | u16 pci_config_word; | ||
1535 | |||
1536 | if (noioapicquirk) | ||
1537 | return; | ||
1538 | |||
1539 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); | ||
1540 | if (!pci_config_word) { | ||
1541 | printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " | ||
1542 | "already disabled\n", | ||
1543 | dev->vendor, dev->device); | ||
1544 | return; | ||
1545 | } | ||
1546 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); | ||
1547 | printk(KERN_INFO "disabled boot interrupts on PCI device " | ||
1548 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1549 | } | ||
1550 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | ||
1551 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | ||
1552 | #endif /* CONFIG_X86_IO_APIC */ | ||
1553 | |||
1426 | /* | 1554 | /* |
1427 | * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size | 1555 | * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size |
1428 | * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. | 1556 | * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. |
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 2cd77ab8fc66..054e05294af8 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c | |||
@@ -328,6 +328,13 @@ isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) | |||
328 | int sr; | 328 | int sr; |
329 | u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; | 329 | u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; |
330 | 330 | ||
331 | /* The clock has an 8 bit wide bcd-coded register (they never learn) | ||
332 | * for the year. tm_year is an offset from 1900 and we are interested | ||
333 | * in the 2000-2099 range, so any value less than 100 is invalid. | ||
334 | */ | ||
335 | if (tm->tm_year < 100) | ||
336 | return -EINVAL; | ||
337 | |||
331 | regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec); | 338 | regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec); |
332 | regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min); | 339 | regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min); |
333 | regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL; | 340 | regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL; |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 363bd1303d21..570ae59c1d5e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1898,15 +1898,19 @@ restart_cb: | |||
1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | 1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); |
1899 | /* Process finished ERP request. */ | 1899 | /* Process finished ERP request. */ |
1900 | if (cqr->refers) { | 1900 | if (cqr->refers) { |
1901 | spin_lock_bh(&block->queue_lock); | ||
1901 | __dasd_block_process_erp(block, cqr); | 1902 | __dasd_block_process_erp(block, cqr); |
1903 | spin_unlock_bh(&block->queue_lock); | ||
1902 | /* restart list_for_xx loop since dasd_process_erp | 1904 | /* restart list_for_xx loop since dasd_process_erp |
1903 | * might remove multiple elements */ | 1905 | * might remove multiple elements */ |
1904 | goto restart_cb; | 1906 | goto restart_cb; |
1905 | } | 1907 | } |
1906 | /* call the callback function */ | 1908 | /* call the callback function */ |
1909 | spin_lock_irq(&block->request_queue_lock); | ||
1907 | cqr->endclk = get_clock(); | 1910 | cqr->endclk = get_clock(); |
1908 | list_del_init(&cqr->blocklist); | 1911 | list_del_init(&cqr->blocklist); |
1909 | __dasd_cleanup_cqr(cqr); | 1912 | __dasd_cleanup_cqr(cqr); |
1913 | spin_unlock_irq(&block->request_queue_lock); | ||
1910 | } | 1914 | } |
1911 | return rc; | 1915 | return rc; |
1912 | } | 1916 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 921443b01d16..2ef25731d197 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | /* This is ugly... */ | 24 | /* This is ugly... */ |
25 | #define PRINTK_HEADER "dasd_devmap:" | 25 | #define PRINTK_HEADER "dasd_devmap:" |
26 | #define DASD_BUS_ID_SIZE 20 | ||
26 | 27 | ||
27 | #include "dasd_int.h" | 28 | #include "dasd_int.h" |
28 | 29 | ||
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache); | |||
41 | */ | 42 | */ |
42 | struct dasd_devmap { | 43 | struct dasd_devmap { |
43 | struct list_head list; | 44 | struct list_head list; |
44 | char bus_id[BUS_ID_SIZE]; | 45 | char bus_id[DASD_BUS_ID_SIZE]; |
45 | unsigned int devindex; | 46 | unsigned int devindex; |
46 | unsigned short features; | 47 | unsigned short features; |
47 | struct dasd_device *device; | 48 | struct dasd_device *device; |
@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id) | |||
94 | int hash, i; | 95 | int hash, i; |
95 | 96 | ||
96 | hash = 0; | 97 | hash = 0; |
97 | for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++) | 98 | for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++) |
98 | hash += *bus_id; | 99 | hash += *bus_id; |
99 | return hash & 0xff; | 100 | return hash & 0xff; |
100 | } | 101 | } |
@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) { | |||
301 | int from, from_id0, from_id1; | 302 | int from, from_id0, from_id1; |
302 | int to, to_id0, to_id1; | 303 | int to, to_id0, to_id1; |
303 | int features, rc; | 304 | int features, rc; |
304 | char bus_id[BUS_ID_SIZE+1], *str; | 305 | char bus_id[DASD_BUS_ID_SIZE+1], *str; |
305 | 306 | ||
306 | str = parsestring; | 307 | str = parsestring; |
307 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); | 308 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); |
@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features) | |||
407 | devmap = NULL; | 408 | devmap = NULL; |
408 | hash = dasd_hash_busid(bus_id); | 409 | hash = dasd_hash_busid(bus_id); |
409 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) | 410 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) |
410 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 411 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
411 | devmap = tmp; | 412 | devmap = tmp; |
412 | break; | 413 | break; |
413 | } | 414 | } |
414 | if (!devmap) { | 415 | if (!devmap) { |
415 | /* This bus_id is new. */ | 416 | /* This bus_id is new. */ |
416 | new->devindex = dasd_max_devindex++; | 417 | new->devindex = dasd_max_devindex++; |
417 | strncpy(new->bus_id, bus_id, BUS_ID_SIZE); | 418 | strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); |
418 | new->features = features; | 419 | new->features = features; |
419 | new->device = NULL; | 420 | new->device = NULL; |
420 | list_add(&new->list, &dasd_hashlists[hash]); | 421 | list_add(&new->list, &dasd_hashlists[hash]); |
@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id) | |||
439 | devmap = ERR_PTR(-ENODEV); | 440 | devmap = ERR_PTR(-ENODEV); |
440 | hash = dasd_hash_busid(bus_id); | 441 | hash = dasd_hash_busid(bus_id); |
441 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { | 442 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { |
442 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 443 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
443 | devmap = tmp; | 444 | devmap = tmp; |
444 | break; | 445 | break; |
445 | } | 446 | } |
@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev) | |||
561 | } | 562 | } |
562 | 563 | ||
563 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 564 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); |
564 | cdev->dev.driver_data = device; | 565 | dev_set_drvdata(&cdev->dev, device); |
565 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 566 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
566 | 567 | ||
567 | return device; | 568 | return device; |
@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device) | |||
597 | 598 | ||
598 | /* Disconnect dasd_device structure from ccw_device structure. */ | 599 | /* Disconnect dasd_device structure from ccw_device structure. */ |
599 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 600 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
600 | device->cdev->dev.driver_data = NULL; | 601 | dev_set_drvdata(&device->cdev->dev, NULL); |
601 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 602 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
602 | 603 | ||
603 | /* | 604 | /* |
@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) | |||
638 | struct dasd_device * | 639 | struct dasd_device * |
639 | dasd_device_from_cdev_locked(struct ccw_device *cdev) | 640 | dasd_device_from_cdev_locked(struct ccw_device *cdev) |
640 | { | 641 | { |
641 | struct dasd_device *device = cdev->dev.driver_data; | 642 | struct dasd_device *device = dev_get_drvdata(&cdev->dev); |
642 | 643 | ||
643 | if (!device) | 644 | if (!device) |
644 | return ERR_PTR(-ENODEV); | 645 | return ERR_PTR(-ENODEV); |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e60d5f968c8..bd2c52e20762 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1496 | 1496 | ||
1497 | 1497 | ||
1498 | /* service information message SIM */ | 1498 | /* service information message SIM */ |
1499 | if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && | 1499 | if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) && |
1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { | 1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { |
1501 | dasd_3990_erp_handle_sim(device, irb->ecw); | 1501 | dasd_3990_erp_handle_sim(device, irb->ecw); |
1502 | dasd_schedule_device_bh(device); | 1502 | dasd_schedule_device_bh(device); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 9088de84b45d..bf6fd348f20e 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off, | |||
180 | 180 | ||
181 | #ifdef CONFIG_DASD_PROFILE | 181 | #ifdef CONFIG_DASD_PROFILE |
182 | static char * | 182 | static char * |
183 | dasd_statistics_array(char *str, unsigned int *array, int shift) | 183 | dasd_statistics_array(char *str, unsigned int *array, int factor) |
184 | { | 184 | { |
185 | int i; | 185 | int i; |
186 | 186 | ||
187 | for (i = 0; i < 32; i++) { | 187 | for (i = 0; i < 32; i++) { |
188 | str += sprintf(str, "%7d ", array[i] >> shift); | 188 | str += sprintf(str, "%7d ", array[i] / factor); |
189 | if (i == 15) | 189 | if (i == 15) |
190 | str += sprintf(str, "\n"); | 190 | str += sprintf(str, "\n"); |
191 | } | 191 | } |
@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
202 | #ifdef CONFIG_DASD_PROFILE | 202 | #ifdef CONFIG_DASD_PROFILE |
203 | struct dasd_profile_info_t *prof; | 203 | struct dasd_profile_info_t *prof; |
204 | char *str; | 204 | char *str; |
205 | int shift; | 205 | int factor; |
206 | 206 | ||
207 | /* check for active profiling */ | 207 | /* check for active profiling */ |
208 | if (dasd_profile_level == DASD_PROFILE_OFF) { | 208 | if (dasd_profile_level == DASD_PROFILE_OFF) { |
@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
214 | 214 | ||
215 | prof = &dasd_global_profile; | 215 | prof = &dasd_global_profile; |
216 | /* prevent couter 'overflow' on output */ | 216 | /* prevent couter 'overflow' on output */ |
217 | for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++); | 217 | for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; |
218 | factor *= 10); | ||
218 | 219 | ||
219 | str = page; | 220 | str = page; |
220 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); | 221 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); |
221 | str += sprintf(str, "with %d sectors(512B each)\n", | 222 | str += sprintf(str, "with %u sectors(512B each)\n", |
222 | prof->dasd_io_sects); | 223 | prof->dasd_io_sects); |
224 | str += sprintf(str, "Scale Factor is %d\n", factor); | ||
223 | str += sprintf(str, | 225 | str += sprintf(str, |
224 | " __<4 ___8 __16 __32 __64 _128 " | 226 | " __<4 ___8 __16 __32 __64 _128 " |
225 | " _256 _512 __1k __2k __4k __8k " | 227 | " _256 _512 __1k __2k __4k __8k " |
@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
230 | " __1G __2G __4G " " _>4G\n"); | 232 | " __1G __2G __4G " " _>4G\n"); |
231 | 233 | ||
232 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); | 234 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); |
233 | str = dasd_statistics_array(str, prof->dasd_io_secs, shift); | 235 | str = dasd_statistics_array(str, prof->dasd_io_secs, factor); |
234 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); | 236 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); |
235 | str = dasd_statistics_array(str, prof->dasd_io_times, shift); | 237 | str = dasd_statistics_array(str, prof->dasd_io_times, factor); |
236 | str += sprintf(str, "Histogram of I/O times per sector\n"); | 238 | str += sprintf(str, "Histogram of I/O times per sector\n"); |
237 | str = dasd_statistics_array(str, prof->dasd_io_timps, shift); | 239 | str = dasd_statistics_array(str, prof->dasd_io_timps, factor); |
238 | str += sprintf(str, "Histogram of I/O time till ssch\n"); | 240 | str += sprintf(str, "Histogram of I/O time till ssch\n"); |
239 | str = dasd_statistics_array(str, prof->dasd_io_time1, shift); | 241 | str = dasd_statistics_array(str, prof->dasd_io_time1, factor); |
240 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); | 242 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); |
241 | str = dasd_statistics_array(str, prof->dasd_io_time2, shift); | 243 | str = dasd_statistics_array(str, prof->dasd_io_time2, factor); |
242 | str += sprintf(str, "Histogram of I/O time between ssch " | 244 | str += sprintf(str, "Histogram of I/O time between ssch " |
243 | "and irq per sector\n"); | 245 | "and irq per sector\n"); |
244 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift); | 246 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); |
245 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); | 247 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); |
246 | str = dasd_statistics_array(str, prof->dasd_io_time3, shift); | 248 | str = dasd_statistics_array(str, prof->dasd_io_time3, factor); |
247 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); | 249 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); |
248 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift); | 250 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); |
249 | len = str - page; | 251 | len = str - page; |
250 | #else | 252 | #else |
251 | len = sprintf(page, "Statistics are not activated in this kernel\n"); | 253 | len = sprintf(page, "Statistics are not activated in this kernel\n"); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 63f26a135fe5..26ffc6ab441d 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -4,6 +4,9 @@ | |||
4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer | 4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define KMSG_COMPONENT "dcssblk" | ||
8 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
9 | |||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
8 | #include <linux/moduleparam.h> | 11 | #include <linux/moduleparam.h> |
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
@@ -17,19 +20,10 @@ | |||
17 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
18 | #include <asm/s390_rdev.h> | 21 | #include <asm/s390_rdev.h> |
19 | 22 | ||
20 | //#define DCSSBLK_DEBUG /* Debug messages on/off */ | ||
21 | #define DCSSBLK_NAME "dcssblk" | 23 | #define DCSSBLK_NAME "dcssblk" |
22 | #define DCSSBLK_MINORS_PER_DISK 1 | 24 | #define DCSSBLK_MINORS_PER_DISK 1 |
23 | #define DCSSBLK_PARM_LEN 400 | 25 | #define DCSSBLK_PARM_LEN 400 |
24 | 26 | #define DCSS_BUS_ID_SIZE 20 | |
25 | #ifdef DCSSBLK_DEBUG | ||
26 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x) | ||
27 | #else | ||
28 | #define PRINT_DEBUG(x...) do {} while (0) | ||
29 | #endif | ||
30 | #define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x) | ||
31 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) | ||
32 | #define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) | ||
33 | 27 | ||
34 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); | 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); |
35 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); |
@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = { | |||
50 | struct dcssblk_dev_info { | 44 | struct dcssblk_dev_info { |
51 | struct list_head lh; | 45 | struct list_head lh; |
52 | struct device dev; | 46 | struct device dev; |
53 | char segment_name[BUS_ID_SIZE]; | 47 | char segment_name[DCSS_BUS_ID_SIZE]; |
54 | atomic_t use_count; | 48 | atomic_t use_count; |
55 | struct gendisk *gd; | 49 | struct gendisk *gd; |
56 | unsigned long start; | 50 | unsigned long start; |
@@ -65,7 +59,7 @@ struct dcssblk_dev_info { | |||
65 | 59 | ||
66 | struct segment_info { | 60 | struct segment_info { |
67 | struct list_head lh; | 61 | struct list_head lh; |
68 | char segment_name[BUS_ID_SIZE]; | 62 | char segment_name[DCSS_BUS_ID_SIZE]; |
69 | unsigned long start; | 63 | unsigned long start; |
70 | unsigned long end; | 64 | unsigned long end; |
71 | int segment_type; | 65 | int segment_type; |
@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
261 | /* check continuity */ | 255 | /* check continuity */ |
262 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { | 256 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { |
263 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { | 257 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { |
264 | PRINT_ERR("Segment %s is not contiguous with " | 258 | pr_err("Adjacent DCSSs %s and %s are not " |
265 | "segment %s\n", | 259 | "contiguous\n", sort_list[i].segment_name, |
266 | sort_list[i].segment_name, | 260 | sort_list[i+1].segment_name); |
267 | sort_list[i+1].segment_name); | ||
268 | rc = -EINVAL; | 261 | rc = -EINVAL; |
269 | goto out; | 262 | goto out; |
270 | } | 263 | } |
@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
275 | !(sort_list[i+1].segment_type & | 268 | !(sort_list[i+1].segment_type & |
276 | SEGMENT_EXCLUSIVE) || | 269 | SEGMENT_EXCLUSIVE) || |
277 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { | 270 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { |
278 | PRINT_ERR("Segment %s has different type from " | 271 | pr_err("DCSS %s and DCSS %s have " |
279 | "segment %s\n", | 272 | "incompatible types\n", |
280 | sort_list[i].segment_name, | 273 | sort_list[i].segment_name, |
281 | sort_list[i+1].segment_name); | 274 | sort_list[i+1].segment_name); |
282 | rc = -EINVAL; | 275 | rc = -EINVAL; |
283 | goto out; | 276 | goto out; |
284 | } | 277 | } |
@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
380 | } else if (inbuf[0] == '0') { | 373 | } else if (inbuf[0] == '0') { |
381 | /* reload segments in exclusive mode */ | 374 | /* reload segments in exclusive mode */ |
382 | if (dev_info->segment_type == SEG_TYPE_SC) { | 375 | if (dev_info->segment_type == SEG_TYPE_SC) { |
383 | PRINT_ERR("Segment type SC (%s) cannot be loaded in " | 376 | pr_err("DCSS %s is of type SC and cannot be " |
384 | "non-shared mode\n", dev_info->segment_name); | 377 | "loaded as exclusive-writable\n", |
378 | dev_info->segment_name); | ||
385 | rc = -EINVAL; | 379 | rc = -EINVAL; |
386 | goto out; | 380 | goto out; |
387 | } | 381 | } |
@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
404 | goto out; | 398 | goto out; |
405 | 399 | ||
406 | removeseg: | 400 | removeseg: |
407 | PRINT_ERR("Could not reload segment(s) of the device %s, removing " | 401 | pr_err("DCSS device %s is removed after a failed access mode " |
408 | "segment(s) now!\n", | 402 | "change\n", dev_info->segment_name); |
409 | dev_info->segment_name); | ||
410 | temp = entry; | 403 | temp = entry; |
411 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 404 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
412 | if (entry != temp) | 405 | if (entry != temp) |
@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
454 | if (inbuf[0] == '1') { | 447 | if (inbuf[0] == '1') { |
455 | if (atomic_read(&dev_info->use_count) == 0) { | 448 | if (atomic_read(&dev_info->use_count) == 0) { |
456 | // device is idle => we save immediately | 449 | // device is idle => we save immediately |
457 | PRINT_INFO("Saving segment(s) of the device %s\n", | 450 | pr_info("All DCSSs that map to device %s are " |
458 | dev_info->segment_name); | 451 | "saved\n", dev_info->segment_name); |
459 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 452 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
460 | segment_save(entry->segment_name); | 453 | segment_save(entry->segment_name); |
461 | } | 454 | } |
462 | } else { | 455 | } else { |
463 | // device is busy => we save it when it becomes | 456 | // device is busy => we save it when it becomes |
464 | // idle in dcssblk_release | 457 | // idle in dcssblk_release |
465 | PRINT_INFO("Device %s is currently busy, segment(s) " | 458 | pr_info("Device %s is in use, its DCSSs will be " |
466 | "will be saved when it becomes idle...\n", | 459 | "saved when it becomes idle\n", |
467 | dev_info->segment_name); | 460 | dev_info->segment_name); |
468 | dev_info->save_pending = 1; | 461 | dev_info->save_pending = 1; |
469 | } | 462 | } |
470 | } else if (inbuf[0] == '0') { | 463 | } else if (inbuf[0] == '0') { |
@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
472 | // device is busy & the user wants to undo his save | 465 | // device is busy & the user wants to undo his save |
473 | // request | 466 | // request |
474 | dev_info->save_pending = 0; | 467 | dev_info->save_pending = 0; |
475 | PRINT_INFO("Pending save for segment(s) of the device " | 468 | pr_info("A pending save request for device %s " |
476 | "%s deactivated\n", | 469 | "has been canceled\n", |
477 | dev_info->segment_name); | 470 | dev_info->segment_name); |
478 | } | 471 | } |
479 | } else { | 472 | } else { |
480 | up_write(&dcssblk_devices_sem); | 473 | up_write(&dcssblk_devices_sem); |
@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
614 | 607 | ||
615 | seg_byte_size = (dev_info->end - dev_info->start + 1); | 608 | seg_byte_size = (dev_info->end - dev_info->start + 1); |
616 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors | 609 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors |
617 | PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, " | 610 | pr_info("Loaded %s with total size %lu bytes and capacity %lu " |
618 | "capacity = %lu (512 Byte) sectors\n", local_buf, | 611 | "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); |
619 | seg_byte_size, seg_byte_size >> 9); | ||
620 | 612 | ||
621 | dev_info->save_pending = 0; | 613 | dev_info->save_pending = 0; |
622 | dev_info->is_shared = 1; | 614 | dev_info->is_shared = 1; |
@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
744 | dev_info = dcssblk_get_device_by_name(local_buf); | 736 | dev_info = dcssblk_get_device_by_name(local_buf); |
745 | if (dev_info == NULL) { | 737 | if (dev_info == NULL) { |
746 | up_write(&dcssblk_devices_sem); | 738 | up_write(&dcssblk_devices_sem); |
747 | PRINT_WARN("Device %s is not loaded!\n", local_buf); | 739 | pr_warning("Device %s cannot be removed because it is not a " |
740 | "known device\n", local_buf); | ||
748 | rc = -ENODEV; | 741 | rc = -ENODEV; |
749 | goto out_buf; | 742 | goto out_buf; |
750 | } | 743 | } |
751 | if (atomic_read(&dev_info->use_count) != 0) { | 744 | if (atomic_read(&dev_info->use_count) != 0) { |
752 | up_write(&dcssblk_devices_sem); | 745 | up_write(&dcssblk_devices_sem); |
753 | PRINT_WARN("Device %s is in use!\n", local_buf); | 746 | pr_warning("Device %s cannot be removed while it is in " |
747 | "use\n", local_buf); | ||
754 | rc = -EBUSY; | 748 | rc = -EBUSY; |
755 | goto out_buf; | 749 | goto out_buf; |
756 | } | 750 | } |
@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) | |||
807 | down_write(&dcssblk_devices_sem); | 801 | down_write(&dcssblk_devices_sem); |
808 | if (atomic_dec_and_test(&dev_info->use_count) | 802 | if (atomic_dec_and_test(&dev_info->use_count) |
809 | && (dev_info->save_pending)) { | 803 | && (dev_info->save_pending)) { |
810 | PRINT_INFO("Device %s became idle and is being saved now\n", | 804 | pr_info("Device %s has become idle and is being saved " |
811 | dev_info->segment_name); | 805 | "now\n", dev_info->segment_name); |
812 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 806 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
813 | segment_save(entry->segment_name); | 807 | segment_save(entry->segment_name); |
814 | } | 808 | } |
@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
851 | case SEG_TYPE_SC: | 845 | case SEG_TYPE_SC: |
852 | /* cannot write to these segments */ | 846 | /* cannot write to these segments */ |
853 | if (bio_data_dir(bio) == WRITE) { | 847 | if (bio_data_dir(bio) == WRITE) { |
854 | PRINT_WARN("rejecting write to ro device %s\n", | 848 | pr_warning("Writing to %s failed because it " |
849 | "is a read-only device\n", | ||
855 | dev_name(&dev_info->dev)); | 850 | dev_name(&dev_info->dev)); |
856 | goto fail; | 851 | goto fail; |
857 | } | 852 | } |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 03916989ed2d..76814f3e898a 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * generic hard disk support to replace ad-hoc partitioning | 25 | * generic hard disk support to replace ad-hoc partitioning |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "xpram" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/module.h> | 31 | #include <linux/module.h> |
29 | #include <linux/moduleparam.h> | 32 | #include <linux/moduleparam.h> |
30 | #include <linux/ctype.h> /* isdigit, isxdigit */ | 33 | #include <linux/ctype.h> /* isdigit, isxdigit */ |
@@ -42,12 +45,6 @@ | |||
42 | #define XPRAM_DEVS 1 /* one partition */ | 45 | #define XPRAM_DEVS 1 /* one partition */ |
43 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ | 46 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ |
44 | 47 | ||
45 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x) | ||
46 | #define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x) | ||
47 | #define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x) | ||
48 | #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) | ||
49 | |||
50 | |||
51 | typedef struct { | 48 | typedef struct { |
52 | unsigned int size; /* size of xpram segment in pages */ | 49 | unsigned int size; /* size of xpram segment in pages */ |
53 | unsigned int offset; /* start page of xpram segment */ | 50 | unsigned int offset; /* start page of xpram segment */ |
@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
264 | 261 | ||
265 | /* Check number of devices. */ | 262 | /* Check number of devices. */ |
266 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { | 263 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { |
267 | PRINT_ERR("invalid number %d of devices\n",devs); | 264 | pr_err("%d is not a valid number of XPRAM devices\n",devs); |
268 | return -EINVAL; | 265 | return -EINVAL; |
269 | } | 266 | } |
270 | xpram_devs = devs; | 267 | xpram_devs = devs; |
@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
295 | mem_auto_no++; | 292 | mem_auto_no++; |
296 | } | 293 | } |
297 | 294 | ||
298 | PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); | 295 | pr_info(" number of devices (partitions): %d \n", xpram_devs); |
299 | for (i = 0; i < xpram_devs; i++) { | 296 | for (i = 0; i < xpram_devs; i++) { |
300 | if (xpram_sizes[i]) | 297 | if (xpram_sizes[i]) |
301 | PRINT_INFO(" size of partition %d: %u kB\n", | 298 | pr_info(" size of partition %d: %u kB\n", |
302 | i, xpram_sizes[i]); | 299 | i, xpram_sizes[i]); |
303 | else | 300 | else |
304 | PRINT_INFO(" size of partition %d to be set " | 301 | pr_info(" size of partition %d to be set " |
305 | "automatically\n",i); | 302 | "automatically\n",i); |
306 | } | 303 | } |
307 | PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n", | 304 | pr_info(" memory needed (for sized partitions): %lu kB\n", |
308 | mem_needed); | 305 | mem_needed); |
309 | PRINT_DEBUG(" partitions to be sized automatically: %d\n", | 306 | pr_info(" partitions to be sized automatically: %d\n", |
310 | mem_auto_no); | 307 | mem_auto_no); |
311 | 308 | ||
312 | if (mem_needed > pages * 4) { | 309 | if (mem_needed > pages * 4) { |
313 | PRINT_ERR("Not enough expanded memory available\n"); | 310 | pr_err("Not enough expanded memory available\n"); |
314 | return -EINVAL; | 311 | return -EINVAL; |
315 | } | 312 | } |
316 | 313 | ||
@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
322 | */ | 319 | */ |
323 | if (mem_auto_no) { | 320 | if (mem_auto_no) { |
324 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; | 321 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; |
325 | PRINT_INFO(" automatically determined " | 322 | pr_info(" automatically determined " |
326 | "partition size: %lu kB\n", mem_auto); | 323 | "partition size: %lu kB\n", mem_auto); |
327 | for (i = 0; i < xpram_devs; i++) | 324 | for (i = 0; i < xpram_devs; i++) |
328 | if (xpram_sizes[i] == 0) | 325 | if (xpram_sizes[i] == 0) |
329 | xpram_sizes[i] = mem_auto; | 326 | xpram_sizes[i] = mem_auto; |
@@ -405,12 +402,12 @@ static int __init xpram_init(void) | |||
405 | 402 | ||
406 | /* Find out size of expanded memory. */ | 403 | /* Find out size of expanded memory. */ |
407 | if (xpram_present() != 0) { | 404 | if (xpram_present() != 0) { |
408 | PRINT_WARN("No expanded memory available\n"); | 405 | pr_err("No expanded memory available\n"); |
409 | return -ENODEV; | 406 | return -ENODEV; |
410 | } | 407 | } |
411 | xpram_pages = xpram_highest_page_index() + 1; | 408 | xpram_pages = xpram_highest_page_index() + 1; |
412 | PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", | 409 | pr_info(" %u pages expanded memory found (%lu KB).\n", |
413 | xpram_pages, (unsigned long) xpram_pages*4); | 410 | xpram_pages, (unsigned long) xpram_pages*4); |
414 | rc = xpram_setup_sizes(xpram_pages); | 411 | rc = xpram_setup_sizes(xpram_pages); |
415 | if (rc) | 412 | if (rc) |
416 | return rc; | 413 | return rc; |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 35fd8dfcaaa6..97e63cf46944 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "monreader" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/module.h> | 13 | #include <linux/module.h> |
11 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -24,19 +27,6 @@ | |||
24 | #include <asm/ebcdic.h> | 27 | #include <asm/ebcdic.h> |
25 | #include <asm/extmem.h> | 28 | #include <asm/extmem.h> |
26 | 29 | ||
27 | //#define MON_DEBUG /* Debug messages on/off */ | ||
28 | |||
29 | #define MON_NAME "monreader" | ||
30 | |||
31 | #define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x) | ||
32 | #define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x) | ||
33 | #define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x) | ||
34 | |||
35 | #ifdef MON_DEBUG | ||
36 | #define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x) | ||
37 | #else | ||
38 | #define P_DEBUG(x...) do {} while (0) | ||
39 | #endif | ||
40 | 30 | ||
41 | #define MON_COLLECT_SAMPLE 0x80 | 31 | #define MON_COLLECT_SAMPLE 0x80 |
42 | #define MON_COLLECT_EVENT 0x40 | 32 | #define MON_COLLECT_EVENT 0x40 |
@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg, | |||
172 | } else | 162 | } else |
173 | monmsg->replied_msglim = 1; | 163 | monmsg->replied_msglim = 1; |
174 | if (rc) { | 164 | if (rc) { |
175 | P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc); | 165 | pr_err("Reading monitor data failed with rc=%i\n", rc); |
176 | return -EIO; | 166 | return -EIO; |
177 | } | 167 | } |
178 | return 0; | 168 | return 0; |
@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
251 | { | 241 | { |
252 | struct mon_private *monpriv = path->private; | 242 | struct mon_private *monpriv = path->private; |
253 | 243 | ||
254 | P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); | 244 | pr_err("z/VM *MONITOR system service disconnected with rc=%i\n", |
245 | ipuser[0]); | ||
255 | iucv_path_sever(path, NULL); | 246 | iucv_path_sever(path, NULL); |
256 | atomic_set(&monpriv->iucv_severed, 1); | 247 | atomic_set(&monpriv->iucv_severed, 1); |
257 | wake_up(&mon_conn_wait_queue); | 248 | wake_up(&mon_conn_wait_queue); |
@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path, | |||
266 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, | 257 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
267 | msg, sizeof(*msg)); | 258 | msg, sizeof(*msg)); |
268 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 259 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
269 | P_WARNING("IUCV message pending, message limit (%i) reached\n", | 260 | pr_warning("The read queue for monitor data is full\n"); |
270 | MON_MSGLIM); | ||
271 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; | 261 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; |
272 | } | 262 | } |
273 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; | 263 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; |
@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
311 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, | 301 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, |
312 | MON_SERVICE, NULL, user_data_connect, monpriv); | 302 | MON_SERVICE, NULL, user_data_connect, monpriv); |
313 | if (rc) { | 303 | if (rc) { |
314 | P_ERROR("iucv connection to *MONITOR failed with " | 304 | pr_err("Connecting to the z/VM *MONITOR system service " |
315 | "IPUSER SEVER code = %i\n", rc); | 305 | "failed with rc=%i\n", rc); |
316 | rc = -EIO; | 306 | rc = -EIO; |
317 | goto out_path; | 307 | goto out_path; |
318 | } | 308 | } |
@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
353 | */ | 343 | */ |
354 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 344 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
355 | if (rc) | 345 | if (rc) |
356 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 346 | pr_warning("Disconnecting the z/VM *MONITOR system service " |
347 | "failed with rc=%i\n", rc); | ||
357 | 348 | ||
358 | atomic_set(&monpriv->iucv_severed, 0); | 349 | atomic_set(&monpriv->iucv_severed, 0); |
359 | atomic_set(&monpriv->iucv_connected, 0); | 350 | atomic_set(&monpriv->iucv_connected, 0); |
@@ -469,7 +460,8 @@ static int __init mon_init(void) | |||
469 | int rc; | 460 | int rc; |
470 | 461 | ||
471 | if (!MACHINE_IS_VM) { | 462 | if (!MACHINE_IS_VM) { |
472 | P_ERROR("not running under z/VM, driver not loaded\n"); | 463 | pr_err("The z/VM *MONITOR record device driver cannot be " |
464 | "loaded without z/VM\n"); | ||
473 | return -ENODEV; | 465 | return -ENODEV; |
474 | } | 466 | } |
475 | 467 | ||
@@ -478,7 +470,8 @@ static int __init mon_init(void) | |||
478 | */ | 470 | */ |
479 | rc = iucv_register(&monreader_iucv_handler, 1); | 471 | rc = iucv_register(&monreader_iucv_handler, 1); |
480 | if (rc) { | 472 | if (rc) { |
481 | P_ERROR("failed to register with iucv driver\n"); | 473 | pr_err("The z/VM *MONITOR record device driver failed to " |
474 | "register with IUCV\n"); | ||
482 | return rc; | 475 | return rc; |
483 | } | 476 | } |
484 | 477 | ||
@@ -488,8 +481,8 @@ static int __init mon_init(void) | |||
488 | goto out_iucv; | 481 | goto out_iucv; |
489 | } | 482 | } |
490 | if (rc != SEG_TYPE_SC) { | 483 | if (rc != SEG_TYPE_SC) { |
491 | P_ERROR("segment %s has unsupported type, should be SC\n", | 484 | pr_err("The specified *MONITOR DCSS %s does not have the " |
492 | mon_dcss_name); | 485 | "required type SC\n", mon_dcss_name); |
493 | rc = -EINVAL; | 486 | rc = -EINVAL; |
494 | goto out_iucv; | 487 | goto out_iucv; |
495 | } | 488 | } |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 4d71aa8c1a79..c7d7483bab9a 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> | 8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "monwriter" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) | |||
64 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); | 67 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); |
65 | if (rc <= 0) | 68 | if (rc <= 0) |
66 | return rc; | 69 | return rc; |
70 | pr_err("Writing monitor data failed with rc=%i\n", rc); | ||
67 | if (rc == 5) | 71 | if (rc == 5) |
68 | return -EPERM; | 72 | return -EPERM; |
69 | printk("DIAG X'DC' error with return code: %i\n", rc); | ||
70 | return -EINVAL; | 73 | return -EINVAL; |
71 | } | 74 | } |
72 | 75 | ||
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index ec9c0bcf66ee..506390496416 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "sclp_cmd" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,9 +19,8 @@ | |||
16 | #include <linux/memory.h> | 19 | #include <linux/memory.h> |
17 | #include <asm/chpid.h> | 20 | #include <asm/chpid.h> |
18 | #include <asm/sclp.h> | 21 | #include <asm/sclp.h> |
19 | #include "sclp.h" | ||
20 | 22 | ||
21 | #define TAG "sclp_cmd: " | 23 | #include "sclp.h" |
22 | 24 | ||
23 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | 25 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 |
24 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | 26 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 |
@@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) | |||
169 | 171 | ||
170 | /* Check response. */ | 172 | /* Check response. */ |
171 | if (request->status != SCLP_REQ_DONE) { | 173 | if (request->status != SCLP_REQ_DONE) { |
172 | printk(KERN_WARNING TAG "sync request failed " | 174 | pr_warning("sync request failed (cmd=0x%08x, " |
173 | "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); | 175 | "status=0x%02x)\n", cmd, request->status); |
174 | rc = -EIO; | 176 | rc = -EIO; |
175 | } | 177 | } |
176 | out: | 178 | out: |
@@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) | |||
224 | if (rc) | 226 | if (rc) |
225 | goto out; | 227 | goto out; |
226 | if (sccb->header.response_code != 0x0010) { | 228 | if (sccb->header.response_code != 0x0010) { |
227 | printk(KERN_WARNING TAG "readcpuinfo failed " | 229 | pr_warning("readcpuinfo failed (response=0x%04x)\n", |
228 | "(response=0x%04x)\n", sccb->header.response_code); | 230 | sccb->header.response_code); |
229 | rc = -EIO; | 231 | rc = -EIO; |
230 | goto out; | 232 | goto out; |
231 | } | 233 | } |
@@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd) | |||
262 | case 0x0120: | 264 | case 0x0120: |
263 | break; | 265 | break; |
264 | default: | 266 | default: |
265 | printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " | 267 | pr_warning("configure cpu failed (cmd=0x%08x, " |
266 | "response=0x%04x)\n", cmd, sccb->header.response_code); | 268 | "response=0x%04x)\n", cmd, |
269 | sccb->header.response_code); | ||
267 | rc = -EIO; | 270 | rc = -EIO; |
268 | break; | 271 | break; |
269 | } | 272 | } |
@@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd) | |||
626 | case 0x0450: | 629 | case 0x0450: |
627 | break; | 630 | break; |
628 | default: | 631 | default: |
629 | printk(KERN_WARNING TAG "configure channel-path failed " | 632 | pr_warning("configure channel-path failed " |
630 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | 633 | "(cmd=0x%08x, response=0x%04x)\n", cmd, |
631 | sccb->header.response_code); | 634 | sccb->header.response_code); |
632 | rc = -EIO; | 635 | rc = -EIO; |
633 | break; | 636 | break; |
634 | } | 637 | } |
@@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info) | |||
695 | if (rc) | 698 | if (rc) |
696 | goto out; | 699 | goto out; |
697 | if (sccb->header.response_code != 0x0010) { | 700 | if (sccb->header.response_code != 0x0010) { |
698 | printk(KERN_WARNING TAG "read channel-path info failed " | 701 | pr_warning("read channel-path info failed " |
699 | "(response=0x%04x)\n", sccb->header.response_code); | 702 | "(response=0x%04x)\n", sccb->header.response_code); |
700 | rc = -EIO; | 703 | rc = -EIO; |
701 | goto out; | 704 | goto out; |
702 | } | 705 | } |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 4cebd6ee6d27..b497afe061cc 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -5,15 +5,17 @@ | |||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_config" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
9 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
10 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
11 | #include <linux/sysdev.h> | 14 | #include <linux/sysdev.h> |
12 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
13 | #include <asm/smp.h> | 16 | #include <asm/smp.h> |
14 | #include "sclp.h" | ||
15 | 17 | ||
16 | #define TAG "sclp_config: " | 18 | #include "sclp.h" |
17 | 19 | ||
18 | struct conf_mgm_data { | 20 | struct conf_mgm_data { |
19 | u8 reserved; | 21 | u8 reserved; |
@@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
31 | int cpu; | 33 | int cpu; |
32 | struct sys_device *sysdev; | 34 | struct sys_device *sysdev; |
33 | 35 | ||
34 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | 36 | pr_warning("cpu capability changed.\n"); |
35 | get_online_cpus(); | 37 | get_online_cpus(); |
36 | for_each_online_cpu(cpu) { | 38 | for_each_online_cpu(cpu) { |
37 | sysdev = get_cpu_sysdev(cpu); | 39 | sysdev = get_cpu_sysdev(cpu); |
@@ -78,7 +80,7 @@ static int __init sclp_conf_init(void) | |||
78 | return rc; | 80 | return rc; |
79 | 81 | ||
80 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { | 82 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { |
81 | printk(KERN_WARNING TAG "no configuration management.\n"); | 83 | pr_warning("no configuration management.\n"); |
82 | sclp_unregister(&sclp_conf_register); | 84 | sclp_unregister(&sclp_conf_register); |
83 | rc = -ENOSYS; | 85 | rc = -ENOSYS; |
84 | } | 86 | } |
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index d887bd261d28..62c2647f37f4 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Michael Ernst <mernst@de.ibm.com> | 7 | * Michael Ernst <mernst@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "sclp_cpi" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
12 | #include <linux/stat.h> | 15 | #include <linux/stat.h> |
@@ -20,6 +23,7 @@ | |||
20 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
21 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
22 | #include <asm/sclp.h> | 25 | #include <asm/sclp.h> |
26 | |||
23 | #include "sclp.h" | 27 | #include "sclp.h" |
24 | #include "sclp_rw.h" | 28 | #include "sclp_rw.h" |
25 | #include "sclp_cpi_sys.h" | 29 | #include "sclp_cpi_sys.h" |
@@ -150,16 +154,16 @@ static int cpi_req(void) | |||
150 | wait_for_completion(&completion); | 154 | wait_for_completion(&completion); |
151 | 155 | ||
152 | if (req->status != SCLP_REQ_DONE) { | 156 | if (req->status != SCLP_REQ_DONE) { |
153 | printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n", | 157 | pr_warning("request failed (status=0x%02x)\n", |
154 | req->status); | 158 | req->status); |
155 | rc = -EIO; | 159 | rc = -EIO; |
156 | goto out_free_req; | 160 | goto out_free_req; |
157 | } | 161 | } |
158 | 162 | ||
159 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; | 163 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; |
160 | if (response != 0x0020) { | 164 | if (response != 0x0020) { |
161 | printk(KERN_WARNING "cpi: failed with " | 165 | pr_warning("request failed with response code 0x%x\n", |
162 | "response code 0x%x\n", response); | 166 | response); |
163 | rc = -EIO; | 167 | rc = -EIO; |
164 | } | 168 | } |
165 | 169 | ||
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 8b854857ba07..6a1c58dc61a7 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -5,15 +5,18 @@ | |||
5 | * Author(s): Michael Holzheu | 5 | * Author(s): Michael Holzheu |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_sdias" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
9 | #include <asm/sclp.h> | 12 | #include <asm/sclp.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include <asm/ipl.h> | 14 | #include <asm/ipl.h> |
15 | |||
12 | #include "sclp.h" | 16 | #include "sclp.h" |
13 | #include "sclp_rw.h" | 17 | #include "sclp_rw.h" |
14 | 18 | ||
15 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) | 19 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) |
16 | #define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) | ||
17 | 20 | ||
18 | #define SDIAS_RETRIES 300 | 21 | #define SDIAS_RETRIES 300 |
19 | #define SDIAS_SLEEP_TICKS 50 | 22 | #define SDIAS_SLEEP_TICKS 50 |
@@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void) | |||
131 | 134 | ||
132 | rc = sdias_sclp_send(&request); | 135 | rc = sdias_sclp_send(&request); |
133 | if (rc) { | 136 | if (rc) { |
134 | ERROR_MSG("sclp_send failed for get_nr_blocks\n"); | 137 | pr_err("sclp_send failed for get_nr_blocks\n"); |
135 | goto out; | 138 | goto out; |
136 | } | 139 | } |
137 | if (sccb.hdr.response_code != 0x0020) { | 140 | if (sccb.hdr.response_code != 0x0020) { |
@@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void) | |||
145 | rc = sccb.evbuf.blk_cnt; | 148 | rc = sccb.evbuf.blk_cnt; |
146 | break; | 149 | break; |
147 | default: | 150 | default: |
148 | ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); | 151 | pr_err("SCLP error: %x\n", |
152 | sccb.evbuf.event_status); | ||
149 | rc = -EIO; | 153 | rc = -EIO; |
150 | goto out; | 154 | goto out; |
151 | } | 155 | } |
@@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
201 | 205 | ||
202 | rc = sdias_sclp_send(&request); | 206 | rc = sdias_sclp_send(&request); |
203 | if (rc) { | 207 | if (rc) { |
204 | ERROR_MSG("sclp_send failed: %x\n", rc); | 208 | pr_err("sclp_send failed: %x\n", rc); |
205 | goto out; | 209 | goto out; |
206 | } | 210 | } |
207 | if (sccb.hdr.response_code != 0x0020) { | 211 | if (sccb.hdr.response_code != 0x0020) { |
@@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
219 | case EVSTATE_NO_DATA: | 223 | case EVSTATE_NO_DATA: |
220 | TRACE("no data\n"); | 224 | TRACE("no data\n"); |
221 | default: | 225 | default: |
222 | ERROR_MSG("Error from SCLP while copying hsa. " | 226 | pr_err("Error from SCLP while copying hsa. " |
223 | "Event status = %x\n", | 227 | "Event status = %x\n", |
224 | sccb.evbuf.event_status); | 228 | sccb.evbuf.event_status); |
225 | rc = -EIO; | 229 | rc = -EIO; |
226 | } | 230 | } |
227 | out: | 231 | out: |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 9854f19f5e62..a839aa531d7c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty) | |||
583 | return count; | 583 | return count; |
584 | } | 584 | } |
585 | 585 | ||
586 | static void | ||
587 | __sclp_vt220_flush_buffer(void) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | |||
591 | sclp_vt220_emit_current(); | ||
592 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
593 | if (timer_pending(&sclp_vt220_timer)) | ||
594 | del_timer(&sclp_vt220_timer); | ||
595 | while (sclp_vt220_outqueue_count > 0) { | ||
596 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
597 | sclp_sync_wait(); | ||
598 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
599 | } | ||
600 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
601 | } | ||
602 | |||
603 | /* | 586 | /* |
604 | * Pass on all buffers to the hardware. Return only when there are no more | 587 | * Pass on all buffers to the hardware. Return only when there are no more |
605 | * buffers pending. | 588 | * buffers pending. |
@@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index) | |||
745 | return sclp_vt220_driver; | 728 | return sclp_vt220_driver; |
746 | } | 729 | } |
747 | 730 | ||
731 | static void __sclp_vt220_flush_buffer(void) | ||
732 | { | ||
733 | unsigned long flags; | ||
734 | |||
735 | sclp_vt220_emit_current(); | ||
736 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
737 | if (timer_pending(&sclp_vt220_timer)) | ||
738 | del_timer(&sclp_vt220_timer); | ||
739 | while (sclp_vt220_outqueue_count > 0) { | ||
740 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
741 | sclp_sync_wait(); | ||
742 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
743 | } | ||
744 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
745 | } | ||
746 | |||
748 | static int | 747 | static int |
749 | sclp_vt220_notify(struct notifier_block *self, | 748 | sclp_vt220_notify(struct notifier_block *self, |
750 | unsigned long event, void *data) | 749 | unsigned long event, void *data) |
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 09e7d9bf438b..a6087cec55b4 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -11,12 +11,14 @@ | |||
11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS | 11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define KMSG_COMPONENT "vmcp" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
14 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
15 | #include <linux/init.h> | 18 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
17 | #include <linux/miscdevice.h> | 20 | #include <linux/miscdevice.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <asm/cpcmd.h> | 22 | #include <asm/cpcmd.h> |
21 | #include <asm/debug.h> | 23 | #include <asm/debug.h> |
22 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
@@ -26,8 +28,6 @@ MODULE_LICENSE("GPL"); | |||
26 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); | 28 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); |
27 | MODULE_DESCRIPTION("z/VM CP interface"); | 29 | MODULE_DESCRIPTION("z/VM CP interface"); |
28 | 30 | ||
29 | #define PRINTK_HEADER "vmcp: " | ||
30 | |||
31 | static debug_info_t *vmcp_debug; | 31 | static debug_info_t *vmcp_debug; |
32 | 32 | ||
33 | static int vmcp_open(struct inode *inode, struct file *file) | 33 | static int vmcp_open(struct inode *inode, struct file *file) |
@@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file) | |||
41 | if (!session) | 41 | if (!session) |
42 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | 43 | ||
44 | lock_kernel(); | ||
45 | session->bufsize = PAGE_SIZE; | 44 | session->bufsize = PAGE_SIZE; |
46 | session->response = NULL; | 45 | session->response = NULL; |
47 | session->resp_size = 0; | 46 | session->resp_size = 0; |
48 | mutex_init(&session->mutex); | 47 | mutex_init(&session->mutex); |
49 | file->private_data = session; | 48 | file->private_data = session; |
50 | unlock_kernel(); | ||
51 | return nonseekable_open(inode, file); | 49 | return nonseekable_open(inode, file); |
52 | } | 50 | } |
53 | 51 | ||
@@ -193,7 +191,8 @@ static int __init vmcp_init(void) | |||
193 | int ret; | 191 | int ret; |
194 | 192 | ||
195 | if (!MACHINE_IS_VM) { | 193 | if (!MACHINE_IS_VM) { |
196 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); | 194 | pr_warning("The z/VM CP interface device driver cannot be " |
195 | "loaded without z/VM\n"); | ||
197 | return -ENODEV; | 196 | return -ENODEV; |
198 | } | 197 | } |
199 | 198 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 24762727bc27..aabbeb909cc6 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -10,6 +10,10 @@ | |||
10 | * Stefan Weinhuber <wein@de.ibm.com> | 10 | * Stefan Weinhuber <wein@de.ibm.com> |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | |||
14 | #define KMSG_COMPONENT "vmlogrdr" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
13 | #include <linux/module.h> | 17 | #include <linux/module.h> |
14 | #include <linux/init.h> | 18 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
@@ -28,8 +32,6 @@ | |||
28 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
29 | #include <linux/string.h> | 33 | #include <linux/string.h> |
30 | 34 | ||
31 | |||
32 | |||
33 | MODULE_AUTHOR | 35 | MODULE_AUTHOR |
34 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" | 36 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" |
35 | " Stefan Weinhuber (wein@de.ibm.com)"); | 37 | " Stefan Weinhuber (wein@de.ibm.com)"); |
@@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
174 | struct vmlogrdr_priv_t * logptr = path->private; | 176 | struct vmlogrdr_priv_t * logptr = path->private; |
175 | u8 reason = (u8) ipuser[8]; | 177 | u8 reason = (u8) ipuser[8]; |
176 | 178 | ||
177 | printk (KERN_ERR "vmlogrdr: connection severed with" | 179 | pr_err("vmlogrdr: connection severed with reason %i\n", reason); |
178 | " reason %i\n", reason); | ||
179 | 180 | ||
180 | iucv_path_sever(path, NULL); | 181 | iucv_path_sever(path, NULL); |
181 | kfree(path); | 182 | kfree(path); |
@@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
333 | if (logptr->autorecording) { | 334 | if (logptr->autorecording) { |
334 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); | 335 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); |
335 | if (ret) | 336 | if (ret) |
336 | printk (KERN_WARNING "vmlogrdr: failed to start " | 337 | pr_warning("vmlogrdr: failed to start " |
337 | "recording automatically\n"); | 338 | "recording automatically\n"); |
338 | } | 339 | } |
339 | 340 | ||
340 | /* create connection to the system service */ | 341 | /* create connection to the system service */ |
@@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
345 | logptr->system_service, NULL, NULL, | 346 | logptr->system_service, NULL, NULL, |
346 | logptr); | 347 | logptr); |
347 | if (connect_rc) { | 348 | if (connect_rc) { |
348 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " | 349 | pr_err("vmlogrdr: iucv connection to %s " |
349 | "failed with rc %i \n", logptr->system_service, | 350 | "failed with rc %i \n", |
350 | connect_rc); | 351 | logptr->system_service, connect_rc); |
351 | goto out_path; | 352 | goto out_path; |
352 | } | 353 | } |
353 | 354 | ||
@@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) | |||
388 | if (logptr->autorecording) { | 389 | if (logptr->autorecording) { |
389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 390 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
390 | if (ret) | 391 | if (ret) |
391 | printk (KERN_WARNING "vmlogrdr: failed to stop " | 392 | pr_warning("vmlogrdr: failed to stop " |
392 | "recording automatically\n"); | 393 | "recording automatically\n"); |
393 | } | 394 | } |
394 | logptr->dev_in_use = 0; | 395 | logptr->dev_in_use = 0; |
395 | 396 | ||
@@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void) | |||
823 | dev_t dev; | 824 | dev_t dev; |
824 | 825 | ||
825 | if (! MACHINE_IS_VM) { | 826 | if (! MACHINE_IS_VM) { |
826 | printk (KERN_ERR "vmlogrdr: not running under VM, " | 827 | pr_err("not running under VM, driver not loaded.\n"); |
827 | "driver not loaded.\n"); | ||
828 | return -ENODEV; | 828 | return -ENODEV; |
829 | } | 829 | } |
830 | 830 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 9020eba620ee..5dcef81fc9d9 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Munzert <munzert@de.ibm.com> | 8 | * Frank Munzert <munzert@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "vmur" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
12 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
13 | 16 | ||
@@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation"); | |||
40 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); | 43 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); |
41 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
42 | 45 | ||
43 | #define PRINTK_HEADER "vmur: " | ||
44 | |||
45 | static dev_t ur_first_dev_maj_min; | 46 | static dev_t ur_first_dev_maj_min; |
46 | static struct class *vmur_class; | 47 | static struct class *vmur_class; |
47 | static struct debug_info *vmur_dbf; | 48 | static struct debug_info *vmur_dbf; |
@@ -987,7 +988,8 @@ static int __init ur_init(void) | |||
987 | dev_t dev; | 988 | dev_t dev; |
988 | 989 | ||
989 | if (!MACHINE_IS_VM) { | 990 | if (!MACHINE_IS_VM) { |
990 | PRINT_ERR("%s is only available under z/VM.\n", ur_banner); | 991 | pr_err("The %s cannot be loaded without z/VM\n", |
992 | ur_banner); | ||
991 | return -ENODEV; | 993 | return -ENODEV; |
992 | } | 994 | } |
993 | 995 | ||
@@ -1006,7 +1008,8 @@ static int __init ur_init(void) | |||
1006 | 1008 | ||
1007 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); | 1009 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); |
1008 | if (rc) { | 1010 | if (rc) { |
1009 | PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); | 1011 | pr_err("Kernel function alloc_chrdev_region failed with " |
1012 | "error code %d\n", rc); | ||
1010 | goto fail_unregister_driver; | 1013 | goto fail_unregister_driver; |
1011 | } | 1014 | } |
1012 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); | 1015 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); |
@@ -1016,7 +1019,7 @@ static int __init ur_init(void) | |||
1016 | rc = PTR_ERR(vmur_class); | 1019 | rc = PTR_ERR(vmur_class); |
1017 | goto fail_unregister_region; | 1020 | goto fail_unregister_region; |
1018 | } | 1021 | } |
1019 | PRINT_INFO("%s loaded.\n", ur_banner); | 1022 | pr_info("%s loaded.\n", ur_banner); |
1020 | return 0; | 1023 | return 0; |
1021 | 1024 | ||
1022 | fail_unregister_region: | 1025 | fail_unregister_region: |
@@ -1034,7 +1037,7 @@ static void __exit ur_exit(void) | |||
1034 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); | 1037 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); |
1035 | ccw_driver_unregister(&ur_driver); | 1038 | ccw_driver_unregister(&ur_driver); |
1036 | debug_unregister(vmur_dbf); | 1039 | debug_unregister(vmur_dbf); |
1037 | PRINT_INFO("%s unloaded.\n", ur_banner); | 1040 | pr_info("%s unloaded.\n", ur_banner); |
1038 | } | 1041 | } |
1039 | 1042 | ||
1040 | module_init(ur_init); | 1043 | module_init(ur_init); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 7fd84be11931..eefc6611412e 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Author(s): Michael Holzheu | 9 | * Author(s): Michael Holzheu |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "zdump" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
14 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
@@ -24,8 +27,6 @@ | |||
24 | #include "sclp.h" | 27 | #include "sclp.h" |
25 | 28 | ||
26 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) | 29 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) |
27 | #define MSG(x...) printk( KERN_ALERT x ) | ||
28 | #define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) | ||
29 | 30 | ||
30 | #define TO_USER 0 | 31 | #define TO_USER 0 |
31 | #define TO_KERNEL 1 | 32 | #define TO_KERNEL 1 |
@@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch) | |||
563 | 564 | ||
564 | switch (arch) { | 565 | switch (arch) { |
565 | case ARCH_S390X: | 566 | case ARCH_S390X: |
566 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | 567 | pr_alert("DETECTED 'S390X (64 bit) OS'\n"); |
567 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | 568 | sys_info.sa_base = SAVE_AREA_BASE_S390X; |
568 | sys_info.sa_size = sizeof(struct save_area_s390x); | 569 | sys_info.sa_size = sizeof(struct save_area_s390x); |
569 | set_s390x_lc_mask(&sys_info.lc_mask); | 570 | set_s390x_lc_mask(&sys_info.lc_mask); |
570 | break; | 571 | break; |
571 | case ARCH_S390: | 572 | case ARCH_S390: |
572 | MSG("DETECTED 'S390 (32 bit) OS'\n"); | 573 | pr_alert("DETECTED 'S390 (32 bit) OS'\n"); |
573 | sys_info.sa_base = SAVE_AREA_BASE_S390; | 574 | sys_info.sa_base = SAVE_AREA_BASE_S390; |
574 | sys_info.sa_size = sizeof(struct save_area_s390); | 575 | sys_info.sa_size = sizeof(struct save_area_s390); |
575 | set_s390_lc_mask(&sys_info.lc_mask); | 576 | set_s390_lc_mask(&sys_info.lc_mask); |
576 | break; | 577 | break; |
577 | default: | 578 | default: |
578 | ERROR_MSG("unknown architecture 0x%x.\n",arch); | 579 | pr_alert("0x%x is an unknown architecture.\n",arch); |
579 | return -EINVAL; | 580 | return -EINVAL; |
580 | } | 581 | } |
581 | sys_info.arch = arch; | 582 | sys_info.arch = arch; |
@@ -674,7 +675,8 @@ static int __init zcore_init(void) | |||
674 | 675 | ||
675 | #ifndef __s390x__ | 676 | #ifndef __s390x__ |
676 | if (arch == ARCH_S390X) { | 677 | if (arch == ARCH_S390X) { |
677 | ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); | 678 | pr_alert("The 32-bit dump tool cannot be used for a " |
679 | "64-bit system\n"); | ||
678 | rc = -EINVAL; | 680 | rc = -EINVAL; |
679 | goto fail; | 681 | goto fail; |
680 | } | 682 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2f547b840ef0..fe00be3675cd 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Arnd Bergmann (arndb@de.ibm.com) | 9 | * Arnd Bergmann (arndb@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid, | |||
50 | { | 53 | { |
51 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { | 54 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { |
52 | if (msgtrigger) | 55 | if (msgtrigger) |
53 | printk(KERN_WARNING "cio: Invalid cio_ignore range " | 56 | pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " |
54 | "0.%x.%04x-0.%x.%04x\n", from_ssid, from, | 57 | "range for cio_ignore\n", from_ssid, from, |
55 | to_ssid, to); | 58 | to_ssid, to); |
59 | |||
56 | return 1; | 60 | return 1; |
57 | } | 61 | } |
58 | 62 | ||
@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, | |||
140 | rc = 0; | 144 | rc = 0; |
141 | out: | 145 | out: |
142 | if (rc && msgtrigger) | 146 | if (rc && msgtrigger) |
143 | printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", | 147 | pr_warning("%s is not a valid device for the cio_ignore " |
144 | str); | 148 | "kernel parameter\n", str); |
145 | 149 | ||
146 | return rc; | 150 | return rc; |
147 | } | 151 | } |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 3ac2c2019f5e..918e6fce2573 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <asm/ccwdev.h> | 19 | #include <asm/ccwdev.h> |
20 | #include <asm/ccwgroup.h> | 20 | #include <asm/ccwgroup.h> |
21 | 21 | ||
22 | #define CCW_BUS_ID_SIZE 20 | ||
23 | |||
22 | /* In Linux 2.4, we had a channel device layer called "chandev" | 24 | /* In Linux 2.4, we had a channel device layer called "chandev" |
23 | * that did all sorts of obscure stuff for networking devices. | 25 | * that did all sorts of obscure stuff for networking devices. |
24 | * This is another driver that serves as a replacement for just | 26 | * This is another driver that serves as a replacement for just |
@@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const | |||
89 | 91 | ||
90 | gdev = to_ccwgroupdev(dev); | 92 | gdev = to_ccwgroupdev(dev); |
91 | 93 | ||
92 | if (gdev->state != CCWGROUP_OFFLINE) | 94 | /* Prevent concurrent online/offline processing and ungrouping. */ |
93 | return -EINVAL; | 95 | if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) |
94 | 96 | return -EAGAIN; | |
97 | if (gdev->state != CCWGROUP_OFFLINE) { | ||
98 | rc = -EINVAL; | ||
99 | goto out; | ||
100 | } | ||
95 | /* Note that we cannot unregister the device from one of its | 101 | /* Note that we cannot unregister the device from one of its |
96 | * attribute methods, so we have to use this roundabout approach. | 102 | * attribute methods, so we have to use this roundabout approach. |
97 | */ | 103 | */ |
98 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); | 104 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); |
99 | if (rc) | 105 | out: |
100 | count = rc; | 106 | if (rc) { |
107 | /* Release onoff "lock" when ungrouping failed. */ | ||
108 | atomic_set(&gdev->onoff, 0); | ||
109 | return rc; | ||
110 | } | ||
101 | return count; | 111 | return count; |
102 | } | 112 | } |
103 | 113 | ||
@@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
172 | len = end - start + 1; | 182 | len = end - start + 1; |
173 | end++; | 183 | end++; |
174 | } | 184 | } |
175 | if (len < BUS_ID_SIZE) { | 185 | if (len < CCW_BUS_ID_SIZE) { |
176 | strlcpy(bus_id, start, len); | 186 | strlcpy(bus_id, start, len); |
177 | rc = 0; | 187 | rc = 0; |
178 | } else | 188 | } else |
@@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
181 | return rc; | 191 | return rc; |
182 | } | 192 | } |
183 | 193 | ||
184 | static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE]) | 194 | static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) |
185 | { | 195 | { |
186 | int cssid, ssid, devno; | 196 | int cssid, ssid, devno; |
187 | 197 | ||
@@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, | |||
213 | { | 223 | { |
214 | struct ccwgroup_device *gdev; | 224 | struct ccwgroup_device *gdev; |
215 | int rc, i; | 225 | int rc, i; |
216 | char tmp_bus_id[BUS_ID_SIZE]; | 226 | char tmp_bus_id[CCW_BUS_ID_SIZE]; |
217 | const char *curr_buf; | 227 | const char *curr_buf; |
218 | 228 | ||
219 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), | 229 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 29826fdd47b8..ebab6ea4659b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "cio" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
333 | struct chp_config_data *data; | 336 | struct chp_config_data *data; |
334 | struct chp_id chpid; | 337 | struct chp_id chpid; |
335 | int num; | 338 | int num; |
339 | char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; | ||
336 | 340 | ||
337 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | 341 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); |
338 | if (sei_area->rs != 0) | 342 | if (sei_area->rs != 0) |
@@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
343 | if (!chp_test_bit(data->map, num)) | 347 | if (!chp_test_bit(data->map, num)) |
344 | continue; | 348 | continue; |
345 | chpid.id = num; | 349 | chpid.id = num; |
346 | printk(KERN_WARNING "cio: processing configure event %d for " | 350 | pr_notice("Processing %s for channel path %x.%02x\n", |
347 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | 351 | events[data->op], chpid.cssid, chpid.id); |
348 | switch (data->op) { | 352 | switch (data->op) { |
349 | case 0: | 353 | case 0: |
350 | chp_cfg_schedule(chpid, 1); | 354 | chp_cfg_schedule(chpid, 1); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index f49f0e502b8d..0a2f2edafc03 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
61 | } | 61 | } |
62 | private->request = NULL; | 62 | private->request = NULL; |
63 | memcpy(&request->irb, irb, sizeof(*irb)); | 63 | memcpy(&request->irb, irb, sizeof(*irb)); |
64 | stsch(sch->schid, &sch->schib); | 64 | cio_update_schib(sch); |
65 | complete(&request->completion); | 65 | complete(&request->completion); |
66 | put_device(&sch->dev); | 66 | put_device(&sch->dev); |
67 | } | 67 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3db2c386546f..8a8df7552969 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch) | |||
104 | return flags; | 107 | return flags; |
105 | } | 108 | } |
106 | 109 | ||
107 | /* | ||
108 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
109 | * return a pointer to the subchannel structure. | ||
110 | */ | ||
111 | static int | ||
112 | cio_tpi(void) | ||
113 | { | ||
114 | struct tpi_info *tpi_info; | ||
115 | struct subchannel *sch; | ||
116 | struct irb *irb; | ||
117 | int irq_context; | ||
118 | |||
119 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
120 | if (tpi (NULL) != 1) | ||
121 | return 0; | ||
122 | irb = (struct irb *) __LC_IRB; | ||
123 | /* Store interrupt response block to lowcore. */ | ||
124 | if (tsch (tpi_info->schid, irb) != 0) | ||
125 | /* Not status pending or not operational. */ | ||
126 | return 1; | ||
127 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
128 | if (!sch) | ||
129 | return 1; | ||
130 | irq_context = in_interrupt(); | ||
131 | if (!irq_context) | ||
132 | local_bh_disable(); | ||
133 | irq_enter (); | ||
134 | spin_lock(sch->lock); | ||
135 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
136 | if (sch->driver && sch->driver->irq) | ||
137 | sch->driver->irq(sch); | ||
138 | spin_unlock(sch->lock); | ||
139 | irq_exit (); | ||
140 | if (!irq_context) | ||
141 | _local_bh_enable(); | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static int | 110 | static int |
146 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 111 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
147 | { | 112 | { |
@@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | |||
152 | else | 117 | else |
153 | sch->lpm = 0; | 118 | sch->lpm = 0; |
154 | 119 | ||
155 | stsch (sch->schid, &sch->schib); | ||
156 | |||
157 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " | 120 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " |
158 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, | 121 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, |
159 | sch->schid.sch_no); | 122 | sch->schid.sch_no); |
123 | |||
124 | if (cio_update_schib(sch)) | ||
125 | return -ENODEV; | ||
126 | |||
160 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); | 127 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); |
161 | CIO_TRACE_EVENT(0, dbf_text); | 128 | CIO_TRACE_EVENT(0, dbf_text); |
162 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); | 129 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); |
@@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch) | |||
354 | switch (ccode) { | 321 | switch (ccode) { |
355 | case 0: /* success */ | 322 | case 0: /* success */ |
356 | /* Update information in scsw. */ | 323 | /* Update information in scsw. */ |
357 | stsch (sch->schid, &sch->schib); | 324 | if (cio_update_schib(sch)) |
325 | return -ENODEV; | ||
358 | return 0; | 326 | return 0; |
359 | case 1: /* status pending */ | 327 | case 1: /* status pending */ |
360 | return -EBUSY; | 328 | return -EBUSY; |
@@ -365,30 +333,70 @@ cio_cancel (struct subchannel *sch) | |||
365 | } | 333 | } |
366 | } | 334 | } |
367 | 335 | ||
336 | |||
337 | static void cio_apply_config(struct subchannel *sch, struct schib *schib) | ||
338 | { | ||
339 | schib->pmcw.intparm = sch->config.intparm; | ||
340 | schib->pmcw.mbi = sch->config.mbi; | ||
341 | schib->pmcw.isc = sch->config.isc; | ||
342 | schib->pmcw.ena = sch->config.ena; | ||
343 | schib->pmcw.mme = sch->config.mme; | ||
344 | schib->pmcw.mp = sch->config.mp; | ||
345 | schib->pmcw.csense = sch->config.csense; | ||
346 | schib->pmcw.mbfc = sch->config.mbfc; | ||
347 | if (sch->config.mbfc) | ||
348 | schib->mba = sch->config.mba; | ||
349 | } | ||
350 | |||
351 | static int cio_check_config(struct subchannel *sch, struct schib *schib) | ||
352 | { | ||
353 | return (schib->pmcw.intparm == sch->config.intparm) && | ||
354 | (schib->pmcw.mbi == sch->config.mbi) && | ||
355 | (schib->pmcw.isc == sch->config.isc) && | ||
356 | (schib->pmcw.ena == sch->config.ena) && | ||
357 | (schib->pmcw.mme == sch->config.mme) && | ||
358 | (schib->pmcw.mp == sch->config.mp) && | ||
359 | (schib->pmcw.csense == sch->config.csense) && | ||
360 | (schib->pmcw.mbfc == sch->config.mbfc) && | ||
361 | (!sch->config.mbfc || (schib->mba == sch->config.mba)); | ||
362 | } | ||
363 | |||
368 | /* | 364 | /* |
369 | * Function: cio_modify | 365 | * cio_commit_config - apply configuration to the subchannel |
370 | * Issues a "Modify Subchannel" on the specified subchannel | ||
371 | */ | 366 | */ |
372 | int | 367 | int cio_commit_config(struct subchannel *sch) |
373 | cio_modify (struct subchannel *sch) | ||
374 | { | 368 | { |
375 | int ccode, retry, ret; | 369 | struct schib schib; |
370 | int ccode, retry, ret = 0; | ||
371 | |||
372 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
373 | return -ENODEV; | ||
376 | 374 | ||
377 | ret = 0; | ||
378 | for (retry = 0; retry < 5; retry++) { | 375 | for (retry = 0; retry < 5; retry++) { |
379 | ccode = msch_err (sch->schid, &sch->schib); | 376 | /* copy desired changes to local schib */ |
380 | if (ccode < 0) /* -EIO if msch gets a program check. */ | 377 | cio_apply_config(sch, &schib); |
378 | ccode = msch_err(sch->schid, &schib); | ||
379 | if (ccode < 0) /* -EIO if msch gets a program check. */ | ||
381 | return ccode; | 380 | return ccode; |
382 | switch (ccode) { | 381 | switch (ccode) { |
383 | case 0: /* successfull */ | 382 | case 0: /* successfull */ |
384 | return 0; | 383 | if (stsch(sch->schid, &schib) || |
385 | case 1: /* status pending */ | 384 | !css_sch_is_valid(&schib)) |
385 | return -ENODEV; | ||
386 | if (cio_check_config(sch, &schib)) { | ||
387 | /* commit changes from local schib */ | ||
388 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
389 | return 0; | ||
390 | } | ||
391 | ret = -EAGAIN; | ||
392 | break; | ||
393 | case 1: /* status pending */ | ||
386 | return -EBUSY; | 394 | return -EBUSY; |
387 | case 2: /* busy */ | 395 | case 2: /* busy */ |
388 | udelay (100); /* allow for recovery */ | 396 | udelay(100); /* allow for recovery */ |
389 | ret = -EBUSY; | 397 | ret = -EBUSY; |
390 | break; | 398 | break; |
391 | case 3: /* not operational */ | 399 | case 3: /* not operational */ |
392 | return -ENODEV; | 400 | return -ENODEV; |
393 | } | 401 | } |
394 | } | 402 | } |
@@ -396,6 +404,23 @@ cio_modify (struct subchannel *sch) | |||
396 | } | 404 | } |
397 | 405 | ||
398 | /** | 406 | /** |
407 | * cio_update_schib - Perform stsch and update schib if subchannel is valid. | ||
408 | * @sch: subchannel on which to perform stsch | ||
409 | * Return zero on success, -ENODEV otherwise. | ||
410 | */ | ||
411 | int cio_update_schib(struct subchannel *sch) | ||
412 | { | ||
413 | struct schib schib; | ||
414 | |||
415 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
416 | return -ENODEV; | ||
417 | |||
418 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
419 | return 0; | ||
420 | } | ||
421 | EXPORT_SYMBOL_GPL(cio_update_schib); | ||
422 | |||
423 | /** | ||
399 | * cio_enable_subchannel - enable a subchannel. | 424 | * cio_enable_subchannel - enable a subchannel. |
400 | * @sch: subchannel to be enabled | 425 | * @sch: subchannel to be enabled |
401 | * @intparm: interruption parameter to set | 426 | * @intparm: interruption parameter to set |
@@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch) | |||
403 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 428 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
404 | { | 429 | { |
405 | char dbf_txt[15]; | 430 | char dbf_txt[15]; |
406 | int ccode; | ||
407 | int retry; | 431 | int retry; |
408 | int ret; | 432 | int ret; |
409 | 433 | ||
@@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
412 | 436 | ||
413 | if (sch_is_pseudo_sch(sch)) | 437 | if (sch_is_pseudo_sch(sch)) |
414 | return -EINVAL; | 438 | return -EINVAL; |
415 | ccode = stsch (sch->schid, &sch->schib); | 439 | if (cio_update_schib(sch)) |
416 | if (ccode) | ||
417 | return -ENODEV; | 440 | return -ENODEV; |
418 | 441 | ||
419 | for (retry = 5, ret = 0; retry > 0; retry--) { | 442 | sch->config.ena = 1; |
420 | sch->schib.pmcw.ena = 1; | 443 | sch->config.isc = sch->isc; |
421 | sch->schib.pmcw.isc = sch->isc; | 444 | sch->config.intparm = intparm; |
422 | sch->schib.pmcw.intparm = intparm; | 445 | |
423 | ret = cio_modify(sch); | 446 | for (retry = 0; retry < 3; retry++) { |
424 | if (ret == -ENODEV) | 447 | ret = cio_commit_config(sch); |
425 | break; | 448 | if (ret == -EIO) { |
426 | if (ret == -EIO) | ||
427 | /* | 449 | /* |
428 | * Got a program check in cio_modify. Try without | 450 | * Got a program check in msch. Try without |
429 | * the concurrent sense bit the next time. | 451 | * the concurrent sense bit the next time. |
430 | */ | 452 | */ |
431 | sch->schib.pmcw.csense = 0; | 453 | sch->config.csense = 0; |
432 | if (ret == 0) { | 454 | } else if (ret == -EBUSY) { |
433 | stsch (sch->schid, &sch->schib); | ||
434 | if (sch->schib.pmcw.ena) | ||
435 | break; | ||
436 | } | ||
437 | if (ret == -EBUSY) { | ||
438 | struct irb irb; | 455 | struct irb irb; |
439 | if (tsch(sch->schid, &irb) != 0) | 456 | if (tsch(sch->schid, &irb) != 0) |
440 | break; | 457 | break; |
441 | } | 458 | } else |
459 | break; | ||
442 | } | 460 | } |
443 | sprintf (dbf_txt, "ret:%d", ret); | 461 | sprintf (dbf_txt, "ret:%d", ret); |
444 | CIO_TRACE_EVENT (2, dbf_txt); | 462 | CIO_TRACE_EVENT (2, dbf_txt); |
@@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); | |||
453 | int cio_disable_subchannel(struct subchannel *sch) | 471 | int cio_disable_subchannel(struct subchannel *sch) |
454 | { | 472 | { |
455 | char dbf_txt[15]; | 473 | char dbf_txt[15]; |
456 | int ccode; | ||
457 | int retry; | ||
458 | int ret; | 474 | int ret; |
459 | 475 | ||
460 | CIO_TRACE_EVENT (2, "dissch"); | 476 | CIO_TRACE_EVENT (2, "dissch"); |
@@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
462 | 478 | ||
463 | if (sch_is_pseudo_sch(sch)) | 479 | if (sch_is_pseudo_sch(sch)) |
464 | return 0; | 480 | return 0; |
465 | ccode = stsch (sch->schid, &sch->schib); | 481 | if (cio_update_schib(sch)) |
466 | if (ccode == 3) /* Not operational. */ | ||
467 | return -ENODEV; | 482 | return -ENODEV; |
468 | 483 | ||
469 | if (scsw_actl(&sch->schib.scsw) != 0) | 484 | if (scsw_actl(&sch->schib.scsw) != 0) |
@@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
473 | */ | 488 | */ |
474 | return -EBUSY; | 489 | return -EBUSY; |
475 | 490 | ||
476 | for (retry = 5, ret = 0; retry > 0; retry--) { | 491 | sch->config.ena = 0; |
477 | sch->schib.pmcw.ena = 0; | 492 | ret = cio_commit_config(sch); |
478 | ret = cio_modify(sch); | 493 | |
479 | if (ret == -ENODEV) | ||
480 | break; | ||
481 | if (ret == -EBUSY) | ||
482 | /* | ||
483 | * The subchannel is busy or status pending. | ||
484 | * We'll disable when the next interrupt was delivered | ||
485 | * via the state machine. | ||
486 | */ | ||
487 | break; | ||
488 | if (ret == 0) { | ||
489 | stsch (sch->schid, &sch->schib); | ||
490 | if (!sch->schib.pmcw.ena) | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | sprintf (dbf_txt, "ret:%d", ret); | 494 | sprintf (dbf_txt, "ret:%d", ret); |
495 | CIO_TRACE_EVENT (2, dbf_txt); | 495 | CIO_TRACE_EVENT (2, dbf_txt); |
496 | return ret; | 496 | return ret; |
@@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx"; | |||
687 | static struct io_subchannel_private console_priv; | 687 | static struct io_subchannel_private console_priv; |
688 | static int console_subchannel_in_use; | 688 | static int console_subchannel_in_use; |
689 | 689 | ||
690 | /* | ||
691 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
692 | * return a pointer to the subchannel structure. | ||
693 | */ | ||
694 | static int cio_tpi(void) | ||
695 | { | ||
696 | struct tpi_info *tpi_info; | ||
697 | struct subchannel *sch; | ||
698 | struct irb *irb; | ||
699 | int irq_context; | ||
700 | |||
701 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
702 | if (tpi(NULL) != 1) | ||
703 | return 0; | ||
704 | irb = (struct irb *) __LC_IRB; | ||
705 | /* Store interrupt response block to lowcore. */ | ||
706 | if (tsch(tpi_info->schid, irb) != 0) | ||
707 | /* Not status pending or not operational. */ | ||
708 | return 1; | ||
709 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
710 | if (!sch) | ||
711 | return 1; | ||
712 | irq_context = in_interrupt(); | ||
713 | if (!irq_context) | ||
714 | local_bh_disable(); | ||
715 | irq_enter(); | ||
716 | spin_lock(sch->lock); | ||
717 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
718 | if (sch->driver && sch->driver->irq) | ||
719 | sch->driver->irq(sch); | ||
720 | spin_unlock(sch->lock); | ||
721 | irq_exit(); | ||
722 | if (!irq_context) | ||
723 | _local_bh_enable(); | ||
724 | return 1; | ||
725 | } | ||
726 | |||
690 | void *cio_get_console_priv(void) | 727 | void *cio_get_console_priv(void) |
691 | { | 728 | { |
692 | return &console_priv; | 729 | return &console_priv; |
@@ -780,7 +817,7 @@ cio_probe_console(void) | |||
780 | sch_no = cio_get_console_sch_no(); | 817 | sch_no = cio_get_console_sch_no(); |
781 | if (sch_no == -1) { | 818 | if (sch_no == -1) { |
782 | console_subchannel_in_use = 0; | 819 | console_subchannel_in_use = 0; |
783 | printk(KERN_WARNING "cio: No ccw console found!\n"); | 820 | pr_warning("No CCW console was found\n"); |
784 | return ERR_PTR(-ENODEV); | 821 | return ERR_PTR(-ENODEV); |
785 | } | 822 | } |
786 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 823 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
@@ -796,10 +833,9 @@ cio_probe_console(void) | |||
796 | * enable console I/O-interrupt subclass | 833 | * enable console I/O-interrupt subclass |
797 | */ | 834 | */ |
798 | isc_register(CONSOLE_ISC); | 835 | isc_register(CONSOLE_ISC); |
799 | console_subchannel.schib.pmcw.isc = CONSOLE_ISC; | 836 | console_subchannel.config.isc = CONSOLE_ISC; |
800 | console_subchannel.schib.pmcw.intparm = | 837 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; |
801 | (u32)(addr_t)&console_subchannel; | 838 | ret = cio_commit_config(&console_subchannel); |
802 | ret = cio_modify(&console_subchannel); | ||
803 | if (ret) { | 839 | if (ret) { |
804 | isc_unregister(CONSOLE_ISC); | 840 | isc_unregister(CONSOLE_ISC); |
805 | console_subchannel_in_use = 0; | 841 | console_subchannel_in_use = 0; |
@@ -811,8 +847,8 @@ cio_probe_console(void) | |||
811 | void | 847 | void |
812 | cio_release_console(void) | 848 | cio_release_console(void) |
813 | { | 849 | { |
814 | console_subchannel.schib.pmcw.intparm = 0; | 850 | console_subchannel.config.intparm = 0; |
815 | cio_modify(&console_subchannel); | 851 | cio_commit_config(&console_subchannel); |
816 | isc_unregister(CONSOLE_ISC); | 852 | isc_unregister(CONSOLE_ISC); |
817 | console_subchannel_in_use = 0; | 853 | console_subchannel_in_use = 0; |
818 | } | 854 | } |
@@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
852 | cc = msch(schid, schib); | 888 | cc = msch(schid, schib); |
853 | if (cc) | 889 | if (cc) |
854 | return (cc==3?-ENODEV:-EBUSY); | 890 | return (cc==3?-ENODEV:-EBUSY); |
855 | stsch(schid, schib); | 891 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) |
892 | return -ENODEV; | ||
856 | if (!schib->pmcw.ena) | 893 | if (!schib->pmcw.ena) |
857 | return 0; | 894 | return 0; |
858 | } | 895 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0fb24784e925..5150fba742ac 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -45,6 +45,19 @@ struct pmcw { | |||
45 | /* ... in an operand exception. */ | 45 | /* ... in an operand exception. */ |
46 | } __attribute__ ((packed)); | 46 | } __attribute__ ((packed)); |
47 | 47 | ||
48 | /* Target SCHIB configuration. */ | ||
49 | struct schib_config { | ||
50 | u64 mba; | ||
51 | u32 intparm; | ||
52 | u16 mbi; | ||
53 | u32 isc:3; | ||
54 | u32 ena:1; | ||
55 | u32 mme:2; | ||
56 | u32 mp:1; | ||
57 | u32 csense:1; | ||
58 | u32 mbfc:1; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
48 | /* | 61 | /* |
49 | * subchannel information block | 62 | * subchannel information block |
50 | */ | 63 | */ |
@@ -82,6 +95,8 @@ struct subchannel { | |||
82 | struct device dev; /* entry in device tree */ | 95 | struct device dev; /* entry in device tree */ |
83 | struct css_driver *driver; | 96 | struct css_driver *driver; |
84 | void *private; /* private per subchannel type data */ | 97 | void *private; /* private per subchannel type data */ |
98 | struct work_struct work; | ||
99 | struct schib_config config; | ||
85 | } __attribute__ ((aligned(8))); | 100 | } __attribute__ ((aligned(8))); |
86 | 101 | ||
87 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ | 102 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ |
@@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); | |||
100 | extern int cio_cancel (struct subchannel *); | 115 | extern int cio_cancel (struct subchannel *); |
101 | extern int cio_set_options (struct subchannel *, int); | 116 | extern int cio_set_options (struct subchannel *, int); |
102 | extern int cio_get_options (struct subchannel *); | 117 | extern int cio_get_options (struct subchannel *); |
103 | extern int cio_modify (struct subchannel *); | 118 | extern int cio_update_schib(struct subchannel *sch); |
119 | extern int cio_commit_config(struct subchannel *sch); | ||
104 | 120 | ||
105 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | 121 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); |
106 | int cio_tm_intrg(struct subchannel *sch); | 122 | int cio_tm_intrg(struct subchannel *sch); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index a90b28c0be57..dc98b2c63862 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "cio" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/bootmem.h> | 31 | #include <linux/bootmem.h> |
29 | #include <linux/device.h> | 32 | #include <linux/device.h> |
30 | #include <linux/init.h> | 33 | #include <linux/init.h> |
@@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff) | |||
185 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, | 188 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, |
186 | unsigned long address) | 189 | unsigned long address) |
187 | { | 190 | { |
188 | int ret; | ||
189 | int retry; | ||
190 | struct subchannel *sch; | 191 | struct subchannel *sch; |
191 | struct schib *schib; | ||
192 | 192 | ||
193 | sch = to_subchannel(cdev->dev.parent); | 193 | sch = to_subchannel(cdev->dev.parent); |
194 | schib = &sch->schib; | ||
195 | /* msch can silently fail, so do it again if necessary */ | ||
196 | for (retry = 0; retry < 3; retry++) { | ||
197 | /* prepare schib */ | ||
198 | stsch(sch->schid, schib); | ||
199 | schib->pmcw.mme = mme; | ||
200 | schib->pmcw.mbfc = mbfc; | ||
201 | /* address can be either a block address or a block index */ | ||
202 | if (mbfc) | ||
203 | schib->mba = address; | ||
204 | else | ||
205 | schib->pmcw.mbi = address; | ||
206 | |||
207 | /* try to submit it */ | ||
208 | switch(ret = msch_err(sch->schid, schib)) { | ||
209 | case 0: | ||
210 | break; | ||
211 | case 1: | ||
212 | case 2: /* in I/O or status pending */ | ||
213 | ret = -EBUSY; | ||
214 | break; | ||
215 | case 3: /* subchannel is no longer valid */ | ||
216 | ret = -ENODEV; | ||
217 | break; | ||
218 | default: /* msch caught an exception */ | ||
219 | ret = -EINVAL; | ||
220 | break; | ||
221 | } | ||
222 | stsch(sch->schid, schib); /* restore the schib */ | ||
223 | |||
224 | if (ret) | ||
225 | break; | ||
226 | 194 | ||
227 | /* check if it worked */ | 195 | sch->config.mme = mme; |
228 | if (schib->pmcw.mme == mme && | 196 | sch->config.mbfc = mbfc; |
229 | schib->pmcw.mbfc == mbfc && | 197 | /* address can be either a block address or a block index */ |
230 | (mbfc ? (schib->mba == address) | 198 | if (mbfc) |
231 | : (schib->pmcw.mbi == address))) | 199 | sch->config.mba = address; |
232 | return 0; | 200 | else |
201 | sch->config.mbi = address; | ||
233 | 202 | ||
234 | ret = -EINVAL; | 203 | return cio_commit_config(sch); |
235 | } | ||
236 | |||
237 | return ret; | ||
238 | } | 204 | } |
239 | 205 | ||
240 | struct set_schib_struct { | 206 | struct set_schib_struct { |
@@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev) | |||
338 | 304 | ||
339 | sch = to_subchannel(cdev->dev.parent); | 305 | sch = to_subchannel(cdev->dev.parent); |
340 | 306 | ||
341 | if (stsch(sch->schid, &sch->schib)) | 307 | if (cio_update_schib(sch)) |
342 | return -ENODEV; | 308 | return -ENODEV; |
343 | 309 | ||
344 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { | 310 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { |
@@ -1359,9 +1325,8 @@ static int __init init_cmf(void) | |||
1359 | default: | 1325 | default: |
1360 | return 1; | 1326 | return 1; |
1361 | } | 1327 | } |
1362 | 1328 | pr_info("Channel measurement facility initialized using format " | |
1363 | printk(KERN_INFO "cio: Channel measurement facility using %s " | 1329 | "%s (mode %s)\n", format_string, detect_string); |
1364 | "format (%s)\n", format_string, detect_string); | ||
1365 | return 0; | 1330 | return 0; |
1366 | } | 1331 | } |
1367 | 1332 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 76bbb1e74c29..8019288bc6de 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -6,6 +6,10 @@ | |||
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | */ | 8 | */ |
9 | |||
10 | #define KMSG_COMPONENT "cio" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
9 | #include <linux/module.h> | 13 | #include <linux/module.h> |
10 | #include <linux/init.h> | 14 | #include <linux/init.h> |
11 | #include <linux/device.h> | 15 | #include <linux/device.h> |
@@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch) | |||
128 | { | 132 | { |
129 | if (sch) { | 133 | if (sch) { |
130 | /* Reset intparm to zeroes. */ | 134 | /* Reset intparm to zeroes. */ |
131 | sch->schib.pmcw.intparm = 0; | 135 | sch->config.intparm = 0; |
132 | cio_modify(sch); | 136 | cio_commit_config(sch); |
133 | kfree(sch->lock); | 137 | kfree(sch->lock); |
134 | kfree(sch); | 138 | kfree(sch); |
135 | } | 139 | } |
@@ -844,8 +848,8 @@ out: | |||
844 | s390_unregister_crw_handler(CRW_RSC_CSS); | 848 | s390_unregister_crw_handler(CRW_RSC_CSS); |
845 | chsc_free_sei_area(); | 849 | chsc_free_sei_area(); |
846 | kfree(slow_subchannel_set); | 850 | kfree(slow_subchannel_set); |
847 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", | 851 | pr_alert("The CSS device driver initialization failed with " |
848 | ret); | 852 | "errno=%d\n", ret); |
849 | return ret; | 853 | return ret; |
850 | } | 854 | } |
851 | 855 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4e4008325e28..23d5752349b5 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev) | |||
376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
377 | } | 377 | } |
378 | spin_unlock_irq(cdev->ccwlock); | 378 | spin_unlock_irq(cdev->ccwlock); |
379 | /* Give up reference from ccw_device_set_online(). */ | ||
380 | put_device(&cdev->dev); | ||
379 | return ret; | 381 | return ret; |
380 | } | 382 | } |
381 | spin_unlock_irq(cdev->ccwlock); | 383 | spin_unlock_irq(cdev->ccwlock); |
382 | if (ret == 0) | 384 | if (ret == 0) { |
383 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | 385 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); |
384 | else { | 386 | /* Give up reference from ccw_device_set_online(). */ |
387 | put_device(&cdev->dev); | ||
388 | } else { | ||
385 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " | 389 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " |
386 | "device 0.%x.%04x\n", | 390 | "device 0.%x.%04x\n", |
387 | ret, cdev->private->dev_id.ssid, | 391 | ret, cdev->private->dev_id.ssid, |
388 | cdev->private->dev_id.devno); | 392 | cdev->private->dev_id.devno); |
389 | cdev->online = 1; | 393 | cdev->online = 1; |
390 | } | 394 | } |
391 | return ret; | 395 | return ret; |
392 | } | 396 | } |
393 | 397 | ||
394 | /** | 398 | /** |
@@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
411 | return -ENODEV; | 415 | return -ENODEV; |
412 | if (cdev->online || !cdev->drv) | 416 | if (cdev->online || !cdev->drv) |
413 | return -EINVAL; | 417 | return -EINVAL; |
418 | /* Hold on to an extra reference while device is online. */ | ||
419 | if (!get_device(&cdev->dev)) | ||
420 | return -ENODEV; | ||
414 | 421 | ||
415 | spin_lock_irq(cdev->ccwlock); | 422 | spin_lock_irq(cdev->ccwlock); |
416 | ret = ccw_device_online(cdev); | 423 | ret = ccw_device_online(cdev); |
@@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
422 | "device 0.%x.%04x\n", | 429 | "device 0.%x.%04x\n", |
423 | ret, cdev->private->dev_id.ssid, | 430 | ret, cdev->private->dev_id.ssid, |
424 | cdev->private->dev_id.devno); | 431 | cdev->private->dev_id.devno); |
432 | /* Give up online reference since onlining failed. */ | ||
433 | put_device(&cdev->dev); | ||
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | if (cdev->private->state != DEV_STATE_ONLINE) | 436 | if (cdev->private->state != DEV_STATE_ONLINE) { |
437 | /* Give up online reference since onlining failed. */ | ||
438 | put_device(&cdev->dev); | ||
428 | return -ENODEV; | 439 | return -ENODEV; |
440 | } | ||
429 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { | 441 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { |
430 | cdev->online = 1; | 442 | cdev->online = 1; |
431 | return 0; | 443 | return 0; |
@@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
440 | "device 0.%x.%04x\n", | 452 | "device 0.%x.%04x\n", |
441 | ret, cdev->private->dev_id.ssid, | 453 | ret, cdev->private->dev_id.ssid, |
442 | cdev->private->dev_id.devno); | 454 | cdev->private->dev_id.devno); |
455 | /* Give up online reference since onlining failed. */ | ||
456 | put_device(&cdev->dev); | ||
443 | return (ret == 0) ? -ENODEV : ret; | 457 | return (ret == 0) ? -ENODEV : ret; |
444 | } | 458 | } |
445 | 459 | ||
@@ -704,6 +718,8 @@ ccw_device_release(struct device *dev) | |||
704 | struct ccw_device *cdev; | 718 | struct ccw_device *cdev; |
705 | 719 | ||
706 | cdev = to_ccwdev(dev); | 720 | cdev = to_ccwdev(dev); |
721 | /* Release reference of parent subchannel. */ | ||
722 | put_device(cdev->dev.parent); | ||
707 | kfree(cdev->private); | 723 | kfree(cdev->private); |
708 | kfree(cdev); | 724 | kfree(cdev); |
709 | } | 725 | } |
@@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
735 | /* Do first half of device_register. */ | 751 | /* Do first half of device_register. */ |
736 | device_initialize(&cdev->dev); | 752 | device_initialize(&cdev->dev); |
737 | if (!get_device(&sch->dev)) { | 753 | if (!get_device(&sch->dev)) { |
738 | if (cdev->dev.release) | 754 | /* Release reference from device_initialize(). */ |
739 | cdev->dev.release(&cdev->dev); | 755 | put_device(&cdev->dev); |
740 | return -ENODEV; | 756 | return -ENODEV; |
741 | } | 757 | } |
742 | return 0; | 758 | return 0; |
@@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch, | |||
778 | struct subchannel *other_sch; | 794 | struct subchannel *other_sch; |
779 | int ret; | 795 | int ret; |
780 | 796 | ||
781 | other_sch = to_subchannel(get_device(cdev->dev.parent)); | 797 | /* Get reference for new parent. */ |
798 | if (!get_device(&sch->dev)) | ||
799 | return; | ||
800 | other_sch = to_subchannel(cdev->dev.parent); | ||
801 | /* Note: device_move() changes cdev->dev.parent */ | ||
782 | ret = device_move(&cdev->dev, &sch->dev); | 802 | ret = device_move(&cdev->dev, &sch->dev); |
783 | if (ret) { | 803 | if (ret) { |
784 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " | 804 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " |
785 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 805 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
786 | cdev->private->dev_id.devno, ret); | 806 | cdev->private->dev_id.devno, ret); |
787 | put_device(&other_sch->dev); | 807 | /* Put reference for new parent. */ |
808 | put_device(&sch->dev); | ||
788 | return; | 809 | return; |
789 | } | 810 | } |
790 | sch_set_cdev(other_sch, NULL); | 811 | sch_set_cdev(other_sch, NULL); |
791 | /* No need to keep a subchannel without ccw device around. */ | 812 | /* No need to keep a subchannel without ccw device around. */ |
792 | css_sch_device_unregister(other_sch); | 813 | css_sch_device_unregister(other_sch); |
793 | put_device(&other_sch->dev); | ||
794 | sch_attach_device(sch, cdev); | 814 | sch_attach_device(sch, cdev); |
815 | /* Put reference for old parent. */ | ||
816 | put_device(&other_sch->dev); | ||
795 | } | 817 | } |
796 | 818 | ||
797 | static void sch_attach_orphaned_device(struct subchannel *sch, | 819 | static void sch_attach_orphaned_device(struct subchannel *sch, |
798 | struct ccw_device *cdev) | 820 | struct ccw_device *cdev) |
799 | { | 821 | { |
800 | int ret; | 822 | int ret; |
823 | struct subchannel *pseudo_sch; | ||
801 | 824 | ||
802 | /* Try to move the ccw device to its new subchannel. */ | 825 | /* Get reference for new parent. */ |
826 | if (!get_device(&sch->dev)) | ||
827 | return; | ||
828 | pseudo_sch = to_subchannel(cdev->dev.parent); | ||
829 | /* | ||
830 | * Try to move the ccw device to its new subchannel. | ||
831 | * Note: device_move() changes cdev->dev.parent | ||
832 | */ | ||
803 | ret = device_move(&cdev->dev, &sch->dev); | 833 | ret = device_move(&cdev->dev, &sch->dev); |
804 | if (ret) { | 834 | if (ret) { |
805 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " | 835 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " |
806 | "failed (ret=%d)!\n", | 836 | "failed (ret=%d)!\n", |
807 | cdev->private->dev_id.ssid, | 837 | cdev->private->dev_id.ssid, |
808 | cdev->private->dev_id.devno, ret); | 838 | cdev->private->dev_id.devno, ret); |
839 | /* Put reference for new parent. */ | ||
840 | put_device(&sch->dev); | ||
809 | return; | 841 | return; |
810 | } | 842 | } |
811 | sch_attach_device(sch, cdev); | 843 | sch_attach_device(sch, cdev); |
844 | /* Put reference on pseudo subchannel. */ | ||
845 | put_device(&pseudo_sch->dev); | ||
812 | } | 846 | } |
813 | 847 | ||
814 | static void sch_create_and_recog_new_device(struct subchannel *sch) | 848 | static void sch_create_and_recog_new_device(struct subchannel *sch) |
@@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) | |||
830 | spin_lock_irq(sch->lock); | 864 | spin_lock_irq(sch->lock); |
831 | sch_set_cdev(sch, NULL); | 865 | sch_set_cdev(sch, NULL); |
832 | spin_unlock_irq(sch->lock); | 866 | spin_unlock_irq(sch->lock); |
833 | if (cdev->dev.release) | ||
834 | cdev->dev.release(&cdev->dev); | ||
835 | css_sch_device_unregister(sch); | 867 | css_sch_device_unregister(sch); |
868 | /* Put reference from io_subchannel_create_ccwdev(). */ | ||
869 | put_device(&sch->dev); | ||
870 | /* Give up initial reference. */ | ||
871 | put_device(&cdev->dev); | ||
836 | } | 872 | } |
837 | } | 873 | } |
838 | 874 | ||
@@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
854 | dev_id.devno = sch->schib.pmcw.dev; | 890 | dev_id.devno = sch->schib.pmcw.dev; |
855 | dev_id.ssid = sch->schid.ssid; | 891 | dev_id.ssid = sch->schid.ssid; |
856 | 892 | ||
893 | /* Increase refcount for pseudo subchannel. */ | ||
894 | get_device(&css->pseudo_subchannel->dev); | ||
857 | /* | 895 | /* |
858 | * Move the orphaned ccw device to the orphanage so the replacing | 896 | * Move the orphaned ccw device to the orphanage so the replacing |
859 | * ccw device can take its place on the subchannel. | 897 | * ccw device can take its place on the subchannel. |
898 | * Note: device_move() changes cdev->dev.parent | ||
860 | */ | 899 | */ |
861 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); | 900 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); |
862 | if (ret) { | 901 | if (ret) { |
863 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " | 902 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " |
864 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 903 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
865 | cdev->private->dev_id.devno, ret); | 904 | cdev->private->dev_id.devno, ret); |
905 | /* Decrease refcount for pseudo subchannel again. */ | ||
906 | put_device(&css->pseudo_subchannel->dev); | ||
866 | return; | 907 | return; |
867 | } | 908 | } |
868 | cdev->ccwlock = css->pseudo_subchannel->lock; | 909 | cdev->ccwlock = css->pseudo_subchannel->lock; |
@@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
875 | if (replacing_cdev) { | 916 | if (replacing_cdev) { |
876 | sch_attach_disconnected_device(sch, replacing_cdev); | 917 | sch_attach_disconnected_device(sch, replacing_cdev); |
877 | /* Release reference from get_disc_ccwdev_by_dev_id() */ | 918 | /* Release reference from get_disc_ccwdev_by_dev_id() */ |
878 | put_device(&cdev->dev); | 919 | put_device(&replacing_cdev->dev); |
920 | /* Release reference of subchannel from old cdev. */ | ||
921 | put_device(&sch->dev); | ||
879 | return; | 922 | return; |
880 | } | 923 | } |
881 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); | 924 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); |
882 | if (replacing_cdev) { | 925 | if (replacing_cdev) { |
883 | sch_attach_orphaned_device(sch, replacing_cdev); | 926 | sch_attach_orphaned_device(sch, replacing_cdev); |
884 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ | 927 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ |
885 | put_device(&cdev->dev); | 928 | put_device(&replacing_cdev->dev); |
929 | /* Release reference of subchannel from old cdev. */ | ||
930 | put_device(&sch->dev); | ||
886 | return; | 931 | return; |
887 | } | 932 | } |
888 | sch_create_and_recog_new_device(sch); | 933 | sch_create_and_recog_new_device(sch); |
934 | /* Release reference of subchannel from old cdev. */ | ||
935 | put_device(&sch->dev); | ||
889 | } | 936 | } |
890 | 937 | ||
891 | /* | 938 | /* |
@@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work) | |||
903 | priv = container_of(work, struct ccw_device_private, kick_work); | 950 | priv = container_of(work, struct ccw_device_private, kick_work); |
904 | cdev = priv->cdev; | 951 | cdev = priv->cdev; |
905 | sch = to_subchannel(cdev->dev.parent); | 952 | sch = to_subchannel(cdev->dev.parent); |
953 | /* | ||
954 | * Check if subchannel is still registered. It may have become | ||
955 | * unregistered if a machine check hit us after finishing | ||
956 | * device recognition but before the register work could be | ||
957 | * queued. | ||
958 | */ | ||
959 | if (!device_is_registered(&sch->dev)) | ||
960 | goto out_err; | ||
906 | css_update_ssd_info(sch); | 961 | css_update_ssd_info(sch); |
907 | /* | 962 | /* |
908 | * io_subchannel_register() will also be called after device | 963 | * io_subchannel_register() will also be called after device |
@@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work) | |||
910 | * be registered). We need to reprobe since we may now have sense id | 965 | * be registered). We need to reprobe since we may now have sense id |
911 | * information. | 966 | * information. |
912 | */ | 967 | */ |
913 | if (klist_node_attached(&cdev->dev.knode_parent)) { | 968 | if (device_is_registered(&cdev->dev)) { |
914 | if (!cdev->drv) { | 969 | if (!cdev->drv) { |
915 | ret = device_reprobe(&cdev->dev); | 970 | ret = device_reprobe(&cdev->dev); |
916 | if (ret) | 971 | if (ret) |
@@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work) | |||
934 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", | 989 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", |
935 | cdev->private->dev_id.ssid, | 990 | cdev->private->dev_id.ssid, |
936 | cdev->private->dev_id.devno, ret); | 991 | cdev->private->dev_id.devno, ret); |
937 | put_device(&cdev->dev); | ||
938 | spin_lock_irqsave(sch->lock, flags); | 992 | spin_lock_irqsave(sch->lock, flags); |
939 | sch_set_cdev(sch, NULL); | 993 | sch_set_cdev(sch, NULL); |
940 | spin_unlock_irqrestore(sch->lock, flags); | 994 | spin_unlock_irqrestore(sch->lock, flags); |
941 | kfree (cdev->private); | 995 | /* Release initial device reference. */ |
942 | kfree (cdev); | 996 | put_device(&cdev->dev); |
943 | put_device(&sch->dev); | 997 | goto out_err; |
944 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
945 | wake_up(&ccw_device_init_wq); | ||
946 | return; | ||
947 | } | 998 | } |
948 | put_device(&cdev->dev); | ||
949 | out: | 999 | out: |
950 | cdev->private->flags.recog_done = 1; | 1000 | cdev->private->flags.recog_done = 1; |
951 | put_device(&sch->dev); | ||
952 | wake_up(&cdev->private->wait_q); | 1001 | wake_up(&cdev->private->wait_q); |
1002 | out_err: | ||
1003 | /* Release reference for workqueue processing. */ | ||
1004 | put_device(&cdev->dev); | ||
953 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1005 | if (atomic_dec_and_test(&ccw_device_init_count)) |
954 | wake_up(&ccw_device_init_wq); | 1006 | wake_up(&ccw_device_init_wq); |
955 | } | 1007 | } |
@@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) | |||
968 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
969 | css_sch_device_unregister(sch); | 1021 | css_sch_device_unregister(sch); |
970 | /* Reset intparm to zeroes. */ | 1022 | /* Reset intparm to zeroes. */ |
971 | sch->schib.pmcw.intparm = 0; | 1023 | sch->config.intparm = 0; |
972 | cio_modify(sch); | 1024 | cio_commit_config(sch); |
973 | /* Release cdev reference for workqueue processing.*/ | 1025 | /* Release cdev reference for workqueue processing.*/ |
974 | put_device(&cdev->dev); | 1026 | put_device(&cdev->dev); |
975 | /* Release subchannel reference for local processing. */ | 1027 | /* Release subchannel reference for local processing. */ |
@@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
998 | PREPARE_WORK(&cdev->private->kick_work, | 1050 | PREPARE_WORK(&cdev->private->kick_work, |
999 | ccw_device_call_sch_unregister); | 1051 | ccw_device_call_sch_unregister); |
1000 | queue_work(slow_path_wq, &cdev->private->kick_work); | 1052 | queue_work(slow_path_wq, &cdev->private->kick_work); |
1001 | /* Release subchannel reference for asynchronous recognition. */ | ||
1002 | put_device(&sch->dev); | ||
1003 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1053 | if (atomic_dec_and_test(&ccw_device_init_count)) |
1004 | wake_up(&ccw_device_init_wq); | 1054 | wake_up(&ccw_device_init_wq); |
1005 | break; | 1055 | break; |
@@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1070 | priv = container_of(work, struct ccw_device_private, kick_work); | 1120 | priv = container_of(work, struct ccw_device_private, kick_work); |
1071 | sch = priv->sch; | 1121 | sch = priv->sch; |
1072 | cdev = priv->cdev; | 1122 | cdev = priv->cdev; |
1073 | former_parent = ccw_device_is_orphan(cdev) ? | 1123 | former_parent = to_subchannel(cdev->dev.parent); |
1074 | NULL : to_subchannel(get_device(cdev->dev.parent)); | 1124 | /* Get reference for new parent. */ |
1125 | if (!get_device(&sch->dev)) | ||
1126 | return; | ||
1075 | mutex_lock(&sch->reg_mutex); | 1127 | mutex_lock(&sch->reg_mutex); |
1076 | /* Try to move the ccw device to its new subchannel. */ | 1128 | /* |
1129 | * Try to move the ccw device to its new subchannel. | ||
1130 | * Note: device_move() changes cdev->dev.parent | ||
1131 | */ | ||
1077 | rc = device_move(&cdev->dev, &sch->dev); | 1132 | rc = device_move(&cdev->dev, &sch->dev); |
1078 | mutex_unlock(&sch->reg_mutex); | 1133 | mutex_unlock(&sch->reg_mutex); |
1079 | if (rc) { | 1134 | if (rc) { |
@@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1083 | cdev->private->dev_id.devno, sch->schid.ssid, | 1138 | cdev->private->dev_id.devno, sch->schid.ssid, |
1084 | sch->schid.sch_no, rc); | 1139 | sch->schid.sch_no, rc); |
1085 | css_sch_device_unregister(sch); | 1140 | css_sch_device_unregister(sch); |
1141 | /* Put reference for new parent again. */ | ||
1142 | put_device(&sch->dev); | ||
1086 | goto out; | 1143 | goto out; |
1087 | } | 1144 | } |
1088 | if (former_parent) { | 1145 | if (!sch_is_pseudo_sch(former_parent)) { |
1089 | spin_lock_irq(former_parent->lock); | 1146 | spin_lock_irq(former_parent->lock); |
1090 | sch_set_cdev(former_parent, NULL); | 1147 | sch_set_cdev(former_parent, NULL); |
1091 | spin_unlock_irq(former_parent->lock); | 1148 | spin_unlock_irq(former_parent->lock); |
1092 | css_sch_device_unregister(former_parent); | 1149 | css_sch_device_unregister(former_parent); |
1093 | /* Reset intparm to zeroes. */ | 1150 | /* Reset intparm to zeroes. */ |
1094 | former_parent->schib.pmcw.intparm = 0; | 1151 | former_parent->config.intparm = 0; |
1095 | cio_modify(former_parent); | 1152 | cio_commit_config(former_parent); |
1096 | } | 1153 | } |
1097 | sch_attach_device(sch, cdev); | 1154 | sch_attach_device(sch, cdev); |
1098 | out: | 1155 | out: |
1099 | if (former_parent) | 1156 | /* Put reference for old parent. */ |
1100 | put_device(&former_parent->dev); | 1157 | put_device(&former_parent->dev); |
1101 | put_device(&cdev->dev); | 1158 | put_device(&cdev->dev); |
1102 | } | 1159 | } |
1103 | 1160 | ||
@@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
1113 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1170 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
1114 | } | 1171 | } |
1115 | 1172 | ||
1173 | void io_subchannel_init_config(struct subchannel *sch) | ||
1174 | { | ||
1175 | memset(&sch->config, 0, sizeof(sch->config)); | ||
1176 | sch->config.csense = 1; | ||
1177 | /* Use subchannel mp mode when there is more than 1 installed CHPID. */ | ||
1178 | if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0) | ||
1179 | sch->config.mp = 1; | ||
1180 | } | ||
1181 | |||
1116 | static void io_subchannel_init_fields(struct subchannel *sch) | 1182 | static void io_subchannel_init_fields(struct subchannel *sch) |
1117 | { | 1183 | { |
1118 | if (cio_is_console(sch->schid)) | 1184 | if (cio_is_console(sch->schid)) |
@@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch) | |||
1127 | sch->schib.pmcw.dev, sch->schid.ssid, | 1193 | sch->schib.pmcw.dev, sch->schid.ssid, |
1128 | sch->schid.sch_no, sch->schib.pmcw.pim, | 1194 | sch->schid.sch_no, sch->schib.pmcw.pim, |
1129 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | 1195 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); |
1130 | /* Initially set up some fields in the pmcw. */ | 1196 | |
1131 | sch->schib.pmcw.ena = 0; | 1197 | io_subchannel_init_config(sch); |
1132 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
1133 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
1134 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
1135 | /* clean up possible residual cmf stuff */ | ||
1136 | sch->schib.pmcw.mme = 0; | ||
1137 | sch->schib.pmcw.mbfc = 0; | ||
1138 | sch->schib.pmcw.mbi = 0; | ||
1139 | sch->schib.mba = 0; | ||
1140 | } | 1198 | } |
1141 | 1199 | ||
1200 | static void io_subchannel_do_unreg(struct work_struct *work) | ||
1201 | { | ||
1202 | struct subchannel *sch; | ||
1203 | |||
1204 | sch = container_of(work, struct subchannel, work); | ||
1205 | css_sch_device_unregister(sch); | ||
1206 | /* Reset intparm to zeroes. */ | ||
1207 | sch->config.intparm = 0; | ||
1208 | cio_commit_config(sch); | ||
1209 | put_device(&sch->dev); | ||
1210 | } | ||
1211 | |||
1212 | /* Schedule unregister if we have no cdev. */ | ||
1213 | static void io_subchannel_schedule_removal(struct subchannel *sch) | ||
1214 | { | ||
1215 | get_device(&sch->dev); | ||
1216 | INIT_WORK(&sch->work, io_subchannel_do_unreg); | ||
1217 | queue_work(slow_path_wq, &sch->work); | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
1221 | * Note: We always return 0 so that we bind to the device even on error. | ||
1222 | * This is needed so that our remove function is called on unregister. | ||
1223 | */ | ||
1142 | static int io_subchannel_probe(struct subchannel *sch) | 1224 | static int io_subchannel_probe(struct subchannel *sch) |
1143 | { | 1225 | { |
1144 | struct ccw_device *cdev; | 1226 | struct ccw_device *cdev; |
@@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1168 | ccw_device_register(cdev); | 1250 | ccw_device_register(cdev); |
1169 | /* | 1251 | /* |
1170 | * Check if the device is already online. If it is | 1252 | * Check if the device is already online. If it is |
1171 | * the reference count needs to be corrected | 1253 | * the reference count needs to be corrected since we |
1172 | * (see ccw_device_online and css_init_done for the | 1254 | * didn't obtain a reference in ccw_device_set_online. |
1173 | * ugly details). | ||
1174 | */ | 1255 | */ |
1175 | if (cdev->private->state != DEV_STATE_NOT_OPER && | 1256 | if (cdev->private->state != DEV_STATE_NOT_OPER && |
1176 | cdev->private->state != DEV_STATE_OFFLINE && | 1257 | cdev->private->state != DEV_STATE_OFFLINE && |
@@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1179 | return 0; | 1260 | return 0; |
1180 | } | 1261 | } |
1181 | io_subchannel_init_fields(sch); | 1262 | io_subchannel_init_fields(sch); |
1182 | /* | 1263 | rc = cio_commit_config(sch); |
1183 | * First check if a fitting device may be found amongst the | 1264 | if (rc) |
1184 | * disconnected devices or in the orphanage. | 1265 | goto out_schedule; |
1185 | */ | ||
1186 | dev_id.devno = sch->schib.pmcw.dev; | ||
1187 | dev_id.ssid = sch->schid.ssid; | ||
1188 | rc = sysfs_create_group(&sch->dev.kobj, | 1266 | rc = sysfs_create_group(&sch->dev.kobj, |
1189 | &io_subchannel_attr_group); | 1267 | &io_subchannel_attr_group); |
1190 | if (rc) | 1268 | if (rc) |
1191 | return rc; | 1269 | goto out_schedule; |
1192 | /* Allocate I/O subchannel private data. */ | 1270 | /* Allocate I/O subchannel private data. */ |
1193 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | 1271 | sch->private = kzalloc(sizeof(struct io_subchannel_private), |
1194 | GFP_KERNEL | GFP_DMA); | 1272 | GFP_KERNEL | GFP_DMA); |
1195 | if (!sch->private) { | 1273 | if (!sch->private) |
1196 | rc = -ENOMEM; | ||
1197 | goto out_err; | 1274 | goto out_err; |
1198 | } | 1275 | /* |
1276 | * First check if a fitting device may be found amongst the | ||
1277 | * disconnected devices or in the orphanage. | ||
1278 | */ | ||
1279 | dev_id.devno = sch->schib.pmcw.dev; | ||
1280 | dev_id.ssid = sch->schid.ssid; | ||
1199 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1281 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
1200 | if (!cdev) | 1282 | if (!cdev) |
1201 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1283 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
@@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1213 | return 0; | 1295 | return 0; |
1214 | } | 1296 | } |
1215 | cdev = io_subchannel_create_ccwdev(sch); | 1297 | cdev = io_subchannel_create_ccwdev(sch); |
1216 | if (IS_ERR(cdev)) { | 1298 | if (IS_ERR(cdev)) |
1217 | rc = PTR_ERR(cdev); | ||
1218 | goto out_err; | 1299 | goto out_err; |
1219 | } | ||
1220 | rc = io_subchannel_recog(cdev, sch); | 1300 | rc = io_subchannel_recog(cdev, sch); |
1221 | if (rc) { | 1301 | if (rc) { |
1222 | spin_lock_irqsave(sch->lock, flags); | 1302 | spin_lock_irqsave(sch->lock, flags); |
1223 | sch_set_cdev(sch, NULL); | 1303 | io_subchannel_recog_done(cdev); |
1224 | spin_unlock_irqrestore(sch->lock, flags); | 1304 | spin_unlock_irqrestore(sch->lock, flags); |
1225 | if (cdev->dev.release) | ||
1226 | cdev->dev.release(&cdev->dev); | ||
1227 | goto out_err; | ||
1228 | } | 1305 | } |
1229 | return 0; | 1306 | return 0; |
1230 | out_err: | 1307 | out_err: |
1231 | kfree(sch->private); | 1308 | kfree(sch->private); |
1232 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | 1309 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); |
1233 | return rc; | 1310 | out_schedule: |
1311 | io_subchannel_schedule_removal(sch); | ||
1312 | return 0; | ||
1234 | } | 1313 | } |
1235 | 1314 | ||
1236 | static int | 1315 | static int |
@@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch) | |||
1275 | 1354 | ||
1276 | static int check_for_io_on_path(struct subchannel *sch, int mask) | 1355 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
1277 | { | 1356 | { |
1278 | int cc; | 1357 | if (cio_update_schib(sch)) |
1279 | |||
1280 | cc = stsch(sch->schid, &sch->schib); | ||
1281 | if (cc) | ||
1282 | return 0; | 1358 | return 0; |
1283 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) | 1359 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) |
1284 | return 1; | 1360 | return 1; |
@@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch, | |||
1347 | io_subchannel_verify(sch); | 1423 | io_subchannel_verify(sch); |
1348 | break; | 1424 | break; |
1349 | case CHP_OFFLINE: | 1425 | case CHP_OFFLINE: |
1350 | if (stsch(sch->schid, &sch->schib)) | 1426 | if (cio_update_schib(sch)) |
1351 | return -ENXIO; | ||
1352 | if (!css_sch_is_valid(&sch->schib)) | ||
1353 | return -ENODEV; | 1427 | return -ENODEV; |
1354 | io_subchannel_terminate_path(sch, mask); | 1428 | io_subchannel_terminate_path(sch, mask); |
1355 | break; | 1429 | break; |
1356 | case CHP_ONLINE: | 1430 | case CHP_ONLINE: |
1357 | if (stsch(sch->schid, &sch->schib)) | 1431 | if (cio_update_schib(sch)) |
1358 | return -ENXIO; | 1432 | return -ENODEV; |
1359 | sch->lpm |= mask & sch->opm; | 1433 | sch->lpm |= mask & sch->opm; |
1360 | io_subchannel_verify(sch); | 1434 | io_subchannel_verify(sch); |
1361 | break; | 1435 | break; |
@@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1610 | spin_lock_irqsave(sch->lock, flags); | 1684 | spin_lock_irqsave(sch->lock, flags); |
1611 | 1685 | ||
1612 | /* Reset intparm to zeroes. */ | 1686 | /* Reset intparm to zeroes. */ |
1613 | sch->schib.pmcw.intparm = 0; | 1687 | sch->config.intparm = 0; |
1614 | cio_modify(sch); | 1688 | cio_commit_config(sch); |
1615 | break; | 1689 | break; |
1616 | case REPROBE: | 1690 | case REPROBE: |
1617 | ccw_device_trigger_reprobe(cdev); | 1691 | ccw_device_trigger_reprobe(cdev); |
@@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev, | |||
1652 | sch->private = cio_get_console_priv(); | 1726 | sch->private = cio_get_console_priv(); |
1653 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | 1727 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); |
1654 | io_subchannel_init_fields(sch); | 1728 | io_subchannel_init_fields(sch); |
1729 | rc = cio_commit_config(sch); | ||
1730 | if (rc) | ||
1731 | return rc; | ||
1655 | sch->driver = &io_subchannel_driver; | 1732 | sch->driver = &io_subchannel_driver; |
1656 | /* Initialize the ccw_device structure. */ | 1733 | /* Initialize the ccw_device structure. */ |
1657 | cdev->dev.parent= &sch->dev; | 1734 | cdev->dev.parent= &sch->dev; |
@@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id) | |||
1723 | 1800 | ||
1724 | bus_id = id; | 1801 | bus_id = id; |
1725 | 1802 | ||
1726 | return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0); | 1803 | return (strcmp(bus_id, dev_name(dev)) == 0); |
1727 | } | 1804 | } |
1728 | 1805 | ||
1729 | 1806 | ||
@@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev) | |||
1806 | "device 0.%x.%04x\n", | 1883 | "device 0.%x.%04x\n", |
1807 | ret, cdev->private->dev_id.ssid, | 1884 | ret, cdev->private->dev_id.ssid, |
1808 | cdev->private->dev_id.devno); | 1885 | cdev->private->dev_id.devno); |
1886 | /* Give up reference obtained in ccw_device_set_online(). */ | ||
1887 | put_device(&cdev->dev); | ||
1809 | } | 1888 | } |
1810 | ccw_device_set_timeout(cdev, 0); | 1889 | ccw_device_set_timeout(cdev, 0); |
1811 | cdev->drv = NULL; | 1890 | cdev->drv = NULL; |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 104ed669db43..0f2e63ea48de 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq; | |||
76 | extern atomic_t ccw_device_init_count; | 76 | extern atomic_t ccw_device_init_count; |
77 | 77 | ||
78 | void io_subchannel_recog_done(struct ccw_device *cdev); | 78 | void io_subchannel_recog_done(struct ccw_device *cdev); |
79 | void io_subchannel_init_config(struct subchannel *sch); | ||
79 | 80 | ||
80 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 81 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
81 | 82 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 10bc03940fb3..8df5eaafc5ab 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
140 | int ret; | 140 | int ret; |
141 | 141 | ||
142 | sch = to_subchannel(cdev->dev.parent); | 142 | sch = to_subchannel(cdev->dev.parent); |
143 | ret = stsch(sch->schid, &sch->schib); | 143 | if (cio_update_schib(sch)) |
144 | if (ret || !sch->schib.pmcw.dnv) | ||
145 | return -ENODEV; | 144 | return -ENODEV; |
146 | if (!sch->schib.pmcw.ena) | 145 | if (!sch->schib.pmcw.ena) |
147 | /* Not operational -> done. */ | 146 | /* Not operational -> done. */ |
@@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
245 | * through ssch() and the path information is up to date. | 244 | * through ssch() and the path information is up to date. |
246 | */ | 245 | */ |
247 | old_lpm = sch->lpm; | 246 | old_lpm = sch->lpm; |
248 | stsch(sch->schid, &sch->schib); | 247 | |
249 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
250 | /* Check since device may again have become not operational. */ | 248 | /* Check since device may again have become not operational. */ |
251 | if (!sch->schib.pmcw.dnv) | 249 | if (cio_update_schib(sch)) |
252 | state = DEV_STATE_NOT_OPER; | 250 | state = DEV_STATE_NOT_OPER; |
251 | else | ||
252 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
253 | |||
253 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) | 254 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) |
254 | /* Force reprobe on all chpids. */ | 255 | /* Force reprobe on all chpids. */ |
255 | old_lpm = 0; | 256 | old_lpm = 0; |
@@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
399 | ccw_device_oper_notify(cdev); | 400 | ccw_device_oper_notify(cdev); |
400 | } | 401 | } |
401 | wake_up(&cdev->private->wait_q); | 402 | wake_up(&cdev->private->wait_q); |
402 | |||
403 | if (css_init_done && state != DEV_STATE_ONLINE) | ||
404 | put_device (&cdev->dev); | ||
405 | } | 403 | } |
406 | 404 | ||
407 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) | 405 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) |
@@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
552 | 550 | ||
553 | sch = to_subchannel(cdev->dev.parent); | 551 | sch = to_subchannel(cdev->dev.parent); |
554 | /* Update schib - pom may have changed. */ | 552 | /* Update schib - pom may have changed. */ |
555 | stsch(sch->schid, &sch->schib); | 553 | if (cio_update_schib(sch)) { |
554 | cdev->private->flags.donotify = 0; | ||
555 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
556 | return; | ||
557 | } | ||
556 | /* Update lpm with verified path mask. */ | 558 | /* Update lpm with verified path mask. */ |
557 | sch->lpm = sch->vpm; | 559 | sch->lpm = sch->vpm; |
558 | /* Repeat path verification? */ | 560 | /* Repeat path verification? */ |
@@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev) | |||
611 | (cdev->private->state != DEV_STATE_BOXED)) | 613 | (cdev->private->state != DEV_STATE_BOXED)) |
612 | return -EINVAL; | 614 | return -EINVAL; |
613 | sch = to_subchannel(cdev->dev.parent); | 615 | sch = to_subchannel(cdev->dev.parent); |
614 | if (css_init_done && !get_device(&cdev->dev)) | ||
615 | return -ENODEV; | ||
616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | 616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
617 | if (ret != 0) { | 617 | if (ret != 0) { |
618 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 618 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
@@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | sch = to_subchannel(cdev->dev.parent); | 674 | sch = to_subchannel(cdev->dev.parent); |
675 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 675 | if (cio_update_schib(sch)) |
676 | return -ENODEV; | 676 | return -ENODEV; |
677 | if (scsw_actl(&sch->schib.scsw) != 0) | 677 | if (scsw_actl(&sch->schib.scsw) != 0) |
678 | return -EBUSY; | 678 | return -EBUSY; |
@@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
750 | * Since we might not just be coming from an interrupt from the | 750 | * Since we might not just be coming from an interrupt from the |
751 | * subchannel we have to update the schib. | 751 | * subchannel we have to update the schib. |
752 | */ | 752 | */ |
753 | stsch(sch->schid, &sch->schib); | 753 | if (cio_update_schib(sch)) { |
754 | ccw_device_verify_done(cdev, -ENODEV); | ||
755 | return; | ||
756 | } | ||
754 | 757 | ||
755 | if (scsw_actl(&sch->schib.scsw) != 0 || | 758 | if (scsw_actl(&sch->schib.scsw) != 0 || |
756 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || | 759 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
@@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) | |||
1016 | 1019 | ||
1017 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
1018 | /* Update some values. */ | 1021 | /* Update some values. */ |
1019 | if (stsch(sch->schid, &sch->schib)) | 1022 | if (cio_update_schib(sch)) |
1020 | return; | ||
1021 | if (!sch->schib.pmcw.dnv) | ||
1022 | return; | 1023 | return; |
1023 | /* | 1024 | /* |
1024 | * The pim, pam, pom values may not be accurate, but they are the best | 1025 | * The pim, pam, pom values may not be accurate, but they are the best |
1025 | * we have before performing device selection :/ | 1026 | * we have before performing device selection :/ |
1026 | */ | 1027 | */ |
1027 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 1028 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
1028 | /* Re-set some bits in the pmcw that were lost. */ | 1029 | /* |
1029 | sch->schib.pmcw.csense = 1; | 1030 | * Use the initial configuration since we can't be shure that the old |
1030 | sch->schib.pmcw.ena = 0; | 1031 | * paths are valid. |
1031 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1032 | */ |
1032 | sch->schib.pmcw.mp = 1; | 1033 | io_subchannel_init_config(sch); |
1034 | if (cio_commit_config(sch)) | ||
1035 | return; | ||
1036 | |||
1033 | /* We should also udate ssd info, but this has to wait. */ | 1037 | /* We should also udate ssd info, but this has to wait. */ |
1034 | /* Check if this is another device which appeared on the same sch. */ | 1038 | /* Check if this is another device which appeared on the same sch. */ |
1035 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1039 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 86bc94eb607f..fc5ca1dd52b3 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev) | |||
504 | sch->vpm = 0; | 504 | sch->vpm = 0; |
505 | 505 | ||
506 | /* Get current pam. */ | 506 | /* Get current pam. */ |
507 | if (stsch(sch->schid, &sch->schib)) { | 507 | if (cio_update_schib(sch)) { |
508 | ccw_device_verify_done(cdev, -ENODEV); | 508 | ccw_device_verify_done(cdev, -ENODEV); |
509 | return; | 509 | return; |
510 | } | 510 | } |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 1b03c5423be2..5814dbee2410 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -56,7 +56,8 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
56 | struct subchannel *sch; | 56 | struct subchannel *sch; |
57 | 57 | ||
58 | sch = to_subchannel(cdev->dev.parent); | 58 | sch = to_subchannel(cdev->dev.parent); |
59 | stsch (sch->schid, &sch->schib); | 59 | if (cio_update_schib(sch)) |
60 | goto doverify; | ||
60 | 61 | ||
61 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " | 62 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " |
62 | "not operational \n", __func__, | 63 | "not operational \n", __func__, |
@@ -64,6 +65,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
64 | sch->schib.pmcw.pnom); | 65 | sch->schib.pmcw.pnom); |
65 | 66 | ||
66 | sch->lpm &= ~sch->schib.pmcw.pnom; | 67 | sch->lpm &= ~sch->schib.pmcw.pnom; |
68 | doverify: | ||
67 | cdev->private->flags.doverify = 1; | 69 | cdev->private->flags.doverify = 1; |
68 | } | 70 | } |
69 | 71 | ||
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index e3ea1d5f2810..42f2b09631b6 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -10,10 +10,10 @@ | |||
10 | 10 | ||
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/schid.h> | 12 | #include <asm/schid.h> |
13 | #include <asm/debug.h> | ||
13 | #include "chsc.h" | 14 | #include "chsc.h" |
14 | 15 | ||
15 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ | 16 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ |
16 | #define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ | ||
17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ | 17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ |
18 | 18 | ||
19 | /* | 19 | /* |
@@ -111,12 +111,12 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
114 | int *start, int *count) | 114 | int *start, int *count, int ack) |
115 | { | 115 | { |
116 | register unsigned long _ccq asm ("0") = *count; | 116 | register unsigned long _ccq asm ("0") = *count; |
117 | register unsigned long _token asm ("1") = token; | 117 | register unsigned long _token asm ("1") = token; |
118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; | 118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; |
119 | unsigned long _state = 0; | 119 | unsigned long _state = (unsigned long)ack << 63; |
120 | 120 | ||
121 | asm volatile( | 121 | asm volatile( |
122 | " .insn rrf,0xB99c0000,%1,%2,0,0" | 122 | " .insn rrf,0xB99c0000,%1,%2,0,0" |
@@ -133,7 +133,7 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue, | |||
133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, | 133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, |
134 | int *start, int *count) { return 0; } | 134 | int *start, int *count) { return 0; } |
135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
136 | int *start, int *count) { return 0; } | 136 | int *start, int *count, int ack) { return 0; } |
137 | #endif /* CONFIG_64BIT */ | 137 | #endif /* CONFIG_64BIT */ |
138 | 138 | ||
139 | struct qdio_irq; | 139 | struct qdio_irq; |
@@ -186,20 +186,14 @@ struct qdio_input_q { | |||
186 | /* input buffer acknowledgement flag */ | 186 | /* input buffer acknowledgement flag */ |
187 | int polling; | 187 | int polling; |
188 | 188 | ||
189 | /* how much sbals are acknowledged with qebsm */ | ||
190 | int ack_count; | ||
191 | |||
189 | /* last time of noticing incoming data */ | 192 | /* last time of noticing incoming data */ |
190 | u64 timestamp; | 193 | u64 timestamp; |
191 | |||
192 | /* lock for clearing the acknowledgement */ | ||
193 | spinlock_t lock; | ||
194 | }; | 194 | }; |
195 | 195 | ||
196 | struct qdio_output_q { | 196 | struct qdio_output_q { |
197 | /* failed siga-w attempts*/ | ||
198 | atomic_t busy_siga_counter; | ||
199 | |||
200 | /* start time of busy condition */ | ||
201 | u64 timestamp; | ||
202 | |||
203 | /* PCIs are enabled for the queue */ | 197 | /* PCIs are enabled for the queue */ |
204 | int pci_out_enabled; | 198 | int pci_out_enabled; |
205 | 199 | ||
@@ -250,6 +244,7 @@ struct qdio_q { | |||
250 | 244 | ||
251 | struct qdio_irq *irq_ptr; | 245 | struct qdio_irq *irq_ptr; |
252 | struct tasklet_struct tasklet; | 246 | struct tasklet_struct tasklet; |
247 | spinlock_t lock; | ||
253 | 248 | ||
254 | /* error condition during a data transfer */ | 249 | /* error condition during a data transfer */ |
255 | unsigned int qdio_error; | 250 | unsigned int qdio_error; |
@@ -300,11 +295,13 @@ struct qdio_irq { | |||
300 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 295 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
301 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 296 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
302 | 297 | ||
298 | debug_info_t *debug_area; | ||
303 | struct mutex setup_mutex; | 299 | struct mutex setup_mutex; |
304 | }; | 300 | }; |
305 | 301 | ||
306 | /* helper functions */ | 302 | /* helper functions */ |
307 | #define queue_type(q) q->irq_ptr->qib.qfmt | 303 | #define queue_type(q) q->irq_ptr->qib.qfmt |
304 | #define SCH_NO(q) (q->irq_ptr->schid.sch_no) | ||
308 | 305 | ||
309 | #define is_thinint_irq(irq) \ | 306 | #define is_thinint_irq(irq) \ |
310 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ | 307 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ |
@@ -348,10 +345,13 @@ static inline unsigned long long get_usecs(void) | |||
348 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) | 345 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) |
349 | #define add_buf(bufnr, inc) \ | 346 | #define add_buf(bufnr, inc) \ |
350 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) | 347 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) |
348 | #define sub_buf(bufnr, dec) \ | ||
349 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | ||
351 | 350 | ||
352 | /* prototypes for thin interrupt */ | 351 | /* prototypes for thin interrupt */ |
353 | void qdio_sync_after_thinint(struct qdio_q *q); | 352 | void qdio_sync_after_thinint(struct qdio_q *q); |
354 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); | 353 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, |
354 | int auto_ack); | ||
355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); | 355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); |
356 | int qdio_inbound_q_moved(struct qdio_q *q); | 356 | int qdio_inbound_q_moved(struct qdio_q *q); |
357 | void qdio_kick_inbound_handler(struct qdio_q *q); | 357 | void qdio_kick_inbound_handler(struct qdio_q *q); |
@@ -378,10 +378,15 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, | 378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, |
379 | int nr_output_qs); | 379 | int nr_output_qs); |
380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); | 380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); |
381 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
382 | struct subchannel_id *schid, | ||
383 | struct qdio_ssqd_desc *data); | ||
381 | int qdio_setup_irq(struct qdio_initialize *init_data); | 384 | int qdio_setup_irq(struct qdio_initialize *init_data); |
382 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | 385 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, |
383 | struct ccw_device *cdev); | 386 | struct ccw_device *cdev); |
384 | void qdio_release_memory(struct qdio_irq *irq_ptr); | 387 | void qdio_release_memory(struct qdio_irq *irq_ptr); |
388 | int qdio_setup_create_sysfs(struct ccw_device *cdev); | ||
389 | void qdio_setup_destroy_sysfs(struct ccw_device *cdev); | ||
385 | int qdio_setup_init(void); | 390 | int qdio_setup_init(void); |
386 | void qdio_setup_exit(void); | 391 | void qdio_setup_exit(void); |
387 | 392 | ||
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f05590355be8..f8a3b6967f69 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include "qdio.h" | 14 | #include "qdio.h" |
15 | 15 | ||
16 | debug_info_t *qdio_dbf_setup; | 16 | debug_info_t *qdio_dbf_setup; |
17 | debug_info_t *qdio_dbf_trace; | 17 | debug_info_t *qdio_dbf_error; |
18 | 18 | ||
19 | static struct dentry *debugfs_root; | 19 | static struct dentry *debugfs_root; |
20 | #define MAX_DEBUGFS_QUEUES 32 | 20 | #define MAX_DEBUGFS_QUEUES 32 |
@@ -22,59 +22,33 @@ static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; | |||
22 | static DEFINE_MUTEX(debugfs_mutex); | 22 | static DEFINE_MUTEX(debugfs_mutex); |
23 | #define QDIO_DEBUGFS_NAME_LEN 40 | 23 | #define QDIO_DEBUGFS_NAME_LEN 40 |
24 | 24 | ||
25 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data) | 25 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
26 | struct qdio_irq *irq_ptr) | ||
26 | { | 27 | { |
27 | char dbf_text[20]; | 28 | char text[20]; |
28 | 29 | ||
29 | sprintf(dbf_text, "qfmt:%x", init_data->q_format); | 30 | DBF_EVENT("qfmt:%1d", init_data->q_format); |
30 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 31 | DBF_HEX(init_data->adapter_name, 8); |
31 | QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); | 32 | DBF_EVENT("qpff%4x", init_data->qib_param_field_format); |
32 | sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); | 33 | DBF_HEX(&init_data->qib_param_field, sizeof(void *)); |
33 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 34 | DBF_HEX(&init_data->input_slib_elements, sizeof(void *)); |
34 | QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); | 35 | DBF_HEX(&init_data->output_slib_elements, sizeof(void *)); |
35 | QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); | 36 | DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs, |
36 | QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); | 37 | init_data->no_output_qs); |
37 | sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); | 38 | DBF_HEX(&init_data->input_handler, sizeof(void *)); |
38 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 39 | DBF_HEX(&init_data->output_handler, sizeof(void *)); |
39 | sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); | 40 | DBF_HEX(&init_data->int_parm, sizeof(long)); |
40 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 41 | DBF_HEX(&init_data->flags, sizeof(long)); |
41 | QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); | 42 | DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); |
42 | QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); | 43 | DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); |
43 | QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); | 44 | DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); |
44 | QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); | 45 | |
45 | QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); | 46 | /* allocate trace view for the interface */ |
46 | QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); | 47 | snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); |
47 | } | 48 | irq_ptr->debug_area = debug_register(text, 2, 1, 16); |
48 | 49 | debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); | |
49 | static void qdio_unregister_dbf_views(void) | 50 | debug_set_level(irq_ptr->debug_area, DBF_WARN); |
50 | { | 51 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); |
51 | if (qdio_dbf_setup) | ||
52 | debug_unregister(qdio_dbf_setup); | ||
53 | if (qdio_dbf_trace) | ||
54 | debug_unregister(qdio_dbf_trace); | ||
55 | } | ||
56 | |||
57 | static int qdio_register_dbf_views(void) | ||
58 | { | ||
59 | qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, | ||
60 | QDIO_DBF_SETUP_NR_AREAS, | ||
61 | QDIO_DBF_SETUP_LEN); | ||
62 | if (!qdio_dbf_setup) | ||
63 | goto oom; | ||
64 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
65 | debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); | ||
66 | |||
67 | qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, | ||
68 | QDIO_DBF_TRACE_NR_AREAS, | ||
69 | QDIO_DBF_TRACE_LEN); | ||
70 | if (!qdio_dbf_trace) | ||
71 | goto oom; | ||
72 | debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); | ||
73 | debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); | ||
74 | return 0; | ||
75 | oom: | ||
76 | qdio_unregister_dbf_views(); | ||
77 | return -ENOMEM; | ||
78 | } | 52 | } |
79 | 53 | ||
80 | static int qstat_show(struct seq_file *m, void *v) | 54 | static int qstat_show(struct seq_file *m, void *v) |
@@ -86,16 +60,18 @@ static int qstat_show(struct seq_file *m, void *v) | |||
86 | if (!q) | 60 | if (!q) |
87 | return 0; | 61 | return 0; |
88 | 62 | ||
89 | seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); | 63 | seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); |
90 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); | 64 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); |
91 | seq_printf(m, "ftc: %d\n", q->first_to_check); | 65 | seq_printf(m, "ftc: %d\n", q->first_to_check); |
92 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); | 66 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); |
93 | seq_printf(m, "polling: %d\n", q->u.in.polling); | 67 | seq_printf(m, "polling: %d\n", q->u.in.polling); |
68 | seq_printf(m, "ack count: %d\n", q->u.in.ack_count); | ||
94 | seq_printf(m, "slsb buffer states:\n"); | 69 | seq_printf(m, "slsb buffer states:\n"); |
70 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | ||
95 | 71 | ||
96 | qdio_siga_sync_q(q); | 72 | qdio_siga_sync_q(q); |
97 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 73 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
98 | get_buf_state(q, i, &state); | 74 | get_buf_state(q, i, &state, 0); |
99 | switch (state) { | 75 | switch (state) { |
100 | case SLSB_P_INPUT_NOT_INIT: | 76 | case SLSB_P_INPUT_NOT_INIT: |
101 | case SLSB_P_OUTPUT_NOT_INIT: | 77 | case SLSB_P_OUTPUT_NOT_INIT: |
@@ -127,6 +103,7 @@ static int qstat_show(struct seq_file *m, void *v) | |||
127 | seq_printf(m, "\n"); | 103 | seq_printf(m, "\n"); |
128 | } | 104 | } |
129 | seq_printf(m, "\n"); | 105 | seq_printf(m, "\n"); |
106 | seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); | ||
130 | return 0; | 107 | return 0; |
131 | } | 108 | } |
132 | 109 | ||
@@ -223,11 +200,24 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd | |||
223 | int __init qdio_debug_init(void) | 200 | int __init qdio_debug_init(void) |
224 | { | 201 | { |
225 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); | 202 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); |
226 | return qdio_register_dbf_views(); | 203 | |
204 | qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); | ||
205 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
206 | debug_set_level(qdio_dbf_setup, DBF_INFO); | ||
207 | DBF_EVENT("dbf created\n"); | ||
208 | |||
209 | qdio_dbf_error = debug_register("qdio_error", 4, 1, 16); | ||
210 | debug_register_view(qdio_dbf_error, &debug_hex_ascii_view); | ||
211 | debug_set_level(qdio_dbf_error, DBF_INFO); | ||
212 | DBF_ERROR("dbf created\n"); | ||
213 | return 0; | ||
227 | } | 214 | } |
228 | 215 | ||
229 | void qdio_debug_exit(void) | 216 | void qdio_debug_exit(void) |
230 | { | 217 | { |
231 | debugfs_remove(debugfs_root); | 218 | debugfs_remove(debugfs_root); |
232 | qdio_unregister_dbf_views(); | 219 | if (qdio_dbf_setup) |
220 | debug_unregister(qdio_dbf_setup); | ||
221 | if (qdio_dbf_error) | ||
222 | debug_unregister(qdio_dbf_error); | ||
233 | } | 223 | } |
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 5a4d85b829ad..5d70bd162ae9 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h | |||
@@ -12,80 +12,72 @@ | |||
12 | #include <asm/qdio.h> | 12 | #include <asm/qdio.h> |
13 | #include "qdio.h" | 13 | #include "qdio.h" |
14 | 14 | ||
15 | #define QDIO_DBF_HEX(ex, name, level, addr, len) \ | 15 | /* that gives us 15 characters in the text event views */ |
16 | #define QDIO_DBF_LEN 16 | ||
17 | |||
18 | extern debug_info_t *qdio_dbf_setup; | ||
19 | extern debug_info_t *qdio_dbf_error; | ||
20 | |||
21 | /* sort out low debug levels early to avoid wasted sprints */ | ||
22 | static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) | ||
23 | { | ||
24 | return (level <= dbf_grp->level); | ||
25 | } | ||
26 | |||
27 | #define DBF_ERR 3 /* error conditions */ | ||
28 | #define DBF_WARN 4 /* warning conditions */ | ||
29 | #define DBF_INFO 6 /* informational */ | ||
30 | |||
31 | #undef DBF_EVENT | ||
32 | #undef DBF_ERROR | ||
33 | #undef DBF_DEV_EVENT | ||
34 | |||
35 | #define DBF_EVENT(text...) \ | ||
16 | do { \ | 36 | do { \ |
17 | if (ex) \ | 37 | char debug_buffer[QDIO_DBF_LEN]; \ |
18 | debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ | 38 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
19 | else \ | 39 | debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ |
20 | debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ | ||
21 | } while (0) | 40 | } while (0) |
22 | #define QDIO_DBF_TEXT(ex, name, level, text) \ | 41 | |
42 | #define DBF_HEX(addr, len) \ | ||
23 | do { \ | 43 | do { \ |
24 | if (ex) \ | 44 | debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ |
25 | debug_text_exception(qdio_dbf_##name, level, text); \ | ||
26 | else \ | ||
27 | debug_text_event(qdio_dbf_##name, level, text); \ | ||
28 | } while (0) | 45 | } while (0) |
29 | 46 | ||
30 | #define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) | 47 | #define DBF_ERROR(text...) \ |
31 | #define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) | 48 | do { \ |
32 | #define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) | 49 | char debug_buffer[QDIO_DBF_LEN]; \ |
33 | 50 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ | |
34 | #ifdef CONFIG_QDIO_DEBUG | 51 | debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ |
35 | #define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) | 52 | } while (0) |
36 | #define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) | ||
37 | #define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) | ||
38 | #define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) | ||
39 | #else | ||
40 | #define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) | ||
41 | #define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) | ||
42 | #define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) | ||
43 | #define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) | ||
44 | #endif /* CONFIG_QDIO_DEBUG */ | ||
45 | |||
46 | #define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) | ||
47 | #define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) | ||
48 | #define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) | ||
49 | |||
50 | #ifdef CONFIG_QDIO_DEBUG | ||
51 | #define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) | ||
52 | #define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) | ||
53 | #define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) | ||
54 | #define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) | ||
55 | #else | ||
56 | #define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) | ||
57 | #define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) | ||
58 | #define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) | ||
59 | #define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) | ||
60 | #endif /* CONFIG_QDIO_DEBUG */ | ||
61 | 53 | ||
62 | /* s390dbf views */ | 54 | #define DBF_ERROR_HEX(addr, len) \ |
63 | #define QDIO_DBF_SETUP_LEN 8 | 55 | do { \ |
64 | #define QDIO_DBF_SETUP_PAGES 8 | 56 | debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ |
65 | #define QDIO_DBF_SETUP_NR_AREAS 1 | 57 | } while (0) |
66 | 58 | ||
67 | #define QDIO_DBF_TRACE_LEN 8 | ||
68 | #define QDIO_DBF_TRACE_NR_AREAS 2 | ||
69 | 59 | ||
70 | #ifdef CONFIG_QDIO_DEBUG | 60 | #define DBF_DEV_EVENT(level, device, text...) \ |
71 | #define QDIO_DBF_TRACE_PAGES 32 | 61 | do { \ |
72 | #define QDIO_DBF_SETUP_LEVEL 6 | 62 | char debug_buffer[QDIO_DBF_LEN]; \ |
73 | #define QDIO_DBF_TRACE_LEVEL 4 | 63 | if (qdio_dbf_passes(device->debug_area, level)) { \ |
74 | #else /* !CONFIG_QDIO_DEBUG */ | 64 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
75 | #define QDIO_DBF_TRACE_PAGES 8 | 65 | debug_text_event(device->debug_area, level, debug_buffer); \ |
76 | #define QDIO_DBF_SETUP_LEVEL 2 | 66 | } \ |
77 | #define QDIO_DBF_TRACE_LEVEL 2 | 67 | } while (0) |
78 | #endif /* CONFIG_QDIO_DEBUG */ | ||
79 | 68 | ||
80 | extern debug_info_t *qdio_dbf_setup; | 69 | #define DBF_DEV_HEX(level, device, addr, len) \ |
81 | extern debug_info_t *qdio_dbf_trace; | 70 | do { \ |
71 | debug_event(device->debug_area, level, (void*)(addr), len); \ | ||
72 | } while (0) | ||
82 | 73 | ||
83 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data); | 74 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
84 | void debug_print_bstat(struct qdio_q *q); | 75 | struct qdio_irq *irq_ptr); |
85 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, | 76 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, |
86 | struct ccw_device *cdev); | 77 | struct ccw_device *cdev); |
87 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, | 78 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, |
88 | struct ccw_device *cdev); | 79 | struct ccw_device *cdev); |
89 | int qdio_debug_init(void); | 80 | int qdio_debug_init(void); |
90 | void qdio_debug_exit(void); | 81 | void qdio_debug_exit(void); |
82 | |||
91 | #endif | 83 | #endif |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 7c8659151993..744f928a59ea 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -74,7 +74,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) | |||
74 | * Note: For IQDC unicast queues only the highest priority queue is processed. | 74 | * Note: For IQDC unicast queues only the highest priority queue is processed. |
75 | */ | 75 | */ |
76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, | 76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, |
77 | u32 *bb, unsigned int fc) | 77 | unsigned int *bb, unsigned int fc) |
78 | { | 78 | { |
79 | register unsigned long __fc asm("0") = fc; | 79 | register unsigned long __fc asm("0") = fc; |
80 | register unsigned long __schid asm("1") = schid; | 80 | register unsigned long __schid asm("1") = schid; |
@@ -95,8 +95,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, | |||
95 | 95 | ||
96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | 96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) |
97 | { | 97 | { |
98 | char dbf_text[15]; | ||
99 | |||
100 | /* all done or next buffer state different */ | 98 | /* all done or next buffer state different */ |
101 | if (ccq == 0 || ccq == 32) | 99 | if (ccq == 0 || ccq == 32) |
102 | return 0; | 100 | return 0; |
@@ -104,8 +102,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
104 | if (ccq == 96 || ccq == 97) | 102 | if (ccq == 96 || ccq == 97) |
105 | return 1; | 103 | return 1; |
106 | /* notify devices immediately */ | 104 | /* notify devices immediately */ |
107 | sprintf(dbf_text, "%d", ccq); | 105 | DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); |
108 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
109 | return -EIO; | 106 | return -EIO; |
110 | } | 107 | } |
111 | 108 | ||
@@ -115,41 +112,45 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
115 | * @state: state of the extracted buffers | 112 | * @state: state of the extracted buffers |
116 | * @start: buffer number to start at | 113 | * @start: buffer number to start at |
117 | * @count: count of buffers to examine | 114 | * @count: count of buffers to examine |
115 | * @auto_ack: automatically acknowledge buffers | ||
118 | * | 116 | * |
119 | * Returns the number of successfull extracted equal buffer states. | 117 | * Returns the number of successfull extracted equal buffer states. |
120 | * Stops processing if a state is different from the last buffers state. | 118 | * Stops processing if a state is different from the last buffers state. |
121 | */ | 119 | */ |
122 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 120 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
123 | int start, int count) | 121 | int start, int count, int auto_ack) |
124 | { | 122 | { |
125 | unsigned int ccq = 0; | 123 | unsigned int ccq = 0; |
126 | int tmp_count = count, tmp_start = start; | 124 | int tmp_count = count, tmp_start = start; |
127 | int nr = q->nr; | 125 | int nr = q->nr; |
128 | int rc; | 126 | int rc; |
129 | char dbf_text[15]; | ||
130 | 127 | ||
131 | BUG_ON(!q->irq_ptr->sch_token); | 128 | BUG_ON(!q->irq_ptr->sch_token); |
129 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); | ||
132 | 130 | ||
133 | if (!q->is_input_q) | 131 | if (!q->is_input_q) |
134 | nr += q->irq_ptr->nr_input_qs; | 132 | nr += q->irq_ptr->nr_input_qs; |
135 | again: | 133 | again: |
136 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 134 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, |
135 | auto_ack); | ||
137 | rc = qdio_check_ccq(q, ccq); | 136 | rc = qdio_check_ccq(q, ccq); |
138 | 137 | ||
139 | /* At least one buffer was processed, return and extract the remaining | 138 | /* At least one buffer was processed, return and extract the remaining |
140 | * buffers later. | 139 | * buffers later. |
141 | */ | 140 | */ |
142 | if ((ccq == 96) && (count != tmp_count)) | 141 | if ((ccq == 96) && (count != tmp_count)) { |
142 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); | ||
143 | return (count - tmp_count); | 143 | return (count - tmp_count); |
144 | } | ||
145 | |||
144 | if (rc == 1) { | 146 | if (rc == 1) { |
145 | QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); | 147 | DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); |
146 | goto again; | 148 | goto again; |
147 | } | 149 | } |
148 | 150 | ||
149 | if (rc < 0) { | 151 | if (rc < 0) { |
150 | QDIO_DBF_TEXT2(1, trace, "eqberr"); | 152 | DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); |
151 | sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); | 153 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
152 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
153 | q->handler(q->irq_ptr->cdev, | 154 | q->handler(q->irq_ptr->cdev, |
154 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 155 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
155 | 0, -1, -1, q->irq_ptr->int_parm); | 156 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -176,9 +177,12 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, | |||
176 | int tmp_count = count, tmp_start = start; | 177 | int tmp_count = count, tmp_start = start; |
177 | int nr = q->nr; | 178 | int nr = q->nr; |
178 | int rc; | 179 | int rc; |
179 | char dbf_text[15]; | 180 | |
181 | if (!count) | ||
182 | return 0; | ||
180 | 183 | ||
181 | BUG_ON(!q->irq_ptr->sch_token); | 184 | BUG_ON(!q->irq_ptr->sch_token); |
185 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); | ||
182 | 186 | ||
183 | if (!q->is_input_q) | 187 | if (!q->is_input_q) |
184 | nr += q->irq_ptr->nr_input_qs; | 188 | nr += q->irq_ptr->nr_input_qs; |
@@ -186,16 +190,13 @@ again: | |||
186 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 190 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); |
187 | rc = qdio_check_ccq(q, ccq); | 191 | rc = qdio_check_ccq(q, ccq); |
188 | if (rc == 1) { | 192 | if (rc == 1) { |
189 | QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); | 193 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); |
194 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); | ||
190 | goto again; | 195 | goto again; |
191 | } | 196 | } |
192 | if (rc < 0) { | 197 | if (rc < 0) { |
193 | QDIO_DBF_TEXT3(1, trace, "sqberr"); | 198 | DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); |
194 | sprintf(dbf_text, "%2x,%2x", count, tmp_count); | 199 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
195 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
196 | sprintf(dbf_text, "%d,%d", ccq, nr); | ||
197 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
198 | |||
199 | q->handler(q->irq_ptr->cdev, | 200 | q->handler(q->irq_ptr->cdev, |
200 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 201 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
201 | 0, -1, -1, q->irq_ptr->int_parm); | 202 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -207,7 +208,8 @@ again: | |||
207 | 208 | ||
208 | /* returns number of examined buffers and their common state in *state */ | 209 | /* returns number of examined buffers and their common state in *state */ |
209 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | 210 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, |
210 | unsigned char *state, unsigned int count) | 211 | unsigned char *state, unsigned int count, |
212 | int auto_ack) | ||
211 | { | 213 | { |
212 | unsigned char __state = 0; | 214 | unsigned char __state = 0; |
213 | int i; | 215 | int i; |
@@ -216,7 +218,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
216 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); | 218 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); |
217 | 219 | ||
218 | if (is_qebsm(q)) | 220 | if (is_qebsm(q)) |
219 | return qdio_do_eqbs(q, state, bufnr, count); | 221 | return qdio_do_eqbs(q, state, bufnr, count, auto_ack); |
220 | 222 | ||
221 | for (i = 0; i < count; i++) { | 223 | for (i = 0; i < count; i++) { |
222 | if (!__state) | 224 | if (!__state) |
@@ -230,9 +232,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
230 | } | 232 | } |
231 | 233 | ||
232 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
233 | unsigned char *state) | 235 | unsigned char *state, int auto_ack) |
234 | { | 236 | { |
235 | return get_buf_states(q, bufnr, state, 1); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
236 | } | 238 | } |
237 | 239 | ||
238 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ | 240 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ |
@@ -282,14 +284,12 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
282 | if (!need_siga_sync(q)) | 284 | if (!need_siga_sync(q)) |
283 | return 0; | 285 | return 0; |
284 | 286 | ||
287 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); | ||
285 | qdio_perf_stat_inc(&perf_stats.siga_sync); | 288 | qdio_perf_stat_inc(&perf_stats.siga_sync); |
286 | 289 | ||
287 | cc = do_siga_sync(q->irq_ptr->schid, output, input); | 290 | cc = do_siga_sync(q->irq_ptr->schid, output, input); |
288 | if (cc) { | 291 | if (cc) |
289 | QDIO_DBF_TEXT4(0, trace, "sigasync"); | 292 | DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); |
290 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
291 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
292 | } | ||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
@@ -311,50 +311,37 @@ static inline int qdio_siga_sync_all(struct qdio_q *q) | |||
311 | return qdio_siga_sync(q, ~0U, ~0U); | 311 | return qdio_siga_sync(q, ~0U, ~0U); |
312 | } | 312 | } |
313 | 313 | ||
314 | static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) | 314 | static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) |
315 | { | 315 | { |
316 | unsigned int fc = 0; | ||
317 | unsigned long schid; | 316 | unsigned long schid; |
317 | unsigned int fc = 0; | ||
318 | u64 start_time = 0; | ||
319 | int cc; | ||
318 | 320 | ||
319 | if (q->u.out.use_enh_siga) { | 321 | if (q->u.out.use_enh_siga) |
320 | fc = 3; | 322 | fc = 3; |
321 | } | 323 | |
322 | if (!is_qebsm(q)) | 324 | if (is_qebsm(q)) { |
323 | schid = *((u32 *)&q->irq_ptr->schid); | ||
324 | else { | ||
325 | schid = q->irq_ptr->sch_token; | 325 | schid = q->irq_ptr->sch_token; |
326 | fc |= 0x80; | 326 | fc |= 0x80; |
327 | } | 327 | } |
328 | return do_siga_output(schid, q->mask, busy_bit, fc); | 328 | else |
329 | } | 329 | schid = *((u32 *)&q->irq_ptr->schid); |
330 | |||
331 | static int qdio_siga_output(struct qdio_q *q) | ||
332 | { | ||
333 | int cc; | ||
334 | u32 busy_bit; | ||
335 | u64 start_time = 0; | ||
336 | char dbf_text[15]; | ||
337 | |||
338 | QDIO_DBF_TEXT5(0, trace, "sigaout"); | ||
339 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
340 | 330 | ||
341 | qdio_perf_stat_inc(&perf_stats.siga_out); | ||
342 | again: | 331 | again: |
343 | cc = qdio_do_siga_output(q, &busy_bit); | 332 | cc = do_siga_output(schid, q->mask, busy_bit, fc); |
344 | if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { | ||
345 | sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); | ||
346 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
347 | 333 | ||
348 | if (!start_time) | 334 | /* hipersocket busy condition */ |
335 | if (*busy_bit) { | ||
336 | WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); | ||
337 | |||
338 | if (!start_time) { | ||
349 | start_time = get_usecs(); | 339 | start_time = get_usecs(); |
350 | else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | 340 | goto again; |
341 | } | ||
342 | if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | ||
351 | goto again; | 343 | goto again; |
352 | } | 344 | } |
353 | |||
354 | if (cc == 2 && busy_bit) | ||
355 | cc |= QDIO_ERROR_SIGA_BUSY; | ||
356 | if (cc) | ||
357 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
358 | return cc; | 345 | return cc; |
359 | } | 346 | } |
360 | 347 | ||
@@ -362,14 +349,12 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
362 | { | 349 | { |
363 | int cc; | 350 | int cc; |
364 | 351 | ||
365 | QDIO_DBF_TEXT4(0, trace, "sigain"); | 352 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); |
366 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
367 | |||
368 | qdio_perf_stat_inc(&perf_stats.siga_in); | 353 | qdio_perf_stat_inc(&perf_stats.siga_in); |
369 | 354 | ||
370 | cc = do_siga_input(q->irq_ptr->schid, q->mask); | 355 | cc = do_siga_input(q->irq_ptr->schid, q->mask); |
371 | if (cc) | 356 | if (cc) |
372 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | 357 | DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); |
373 | return cc; | 358 | return cc; |
374 | } | 359 | } |
375 | 360 | ||
@@ -387,35 +372,91 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
387 | 372 | ||
388 | inline void qdio_stop_polling(struct qdio_q *q) | 373 | inline void qdio_stop_polling(struct qdio_q *q) |
389 | { | 374 | { |
390 | spin_lock_bh(&q->u.in.lock); | 375 | if (!q->u.in.polling) |
391 | if (!q->u.in.polling) { | ||
392 | spin_unlock_bh(&q->u.in.lock); | ||
393 | return; | 376 | return; |
394 | } | 377 | |
395 | q->u.in.polling = 0; | 378 | q->u.in.polling = 0; |
396 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); | 379 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); |
397 | 380 | ||
398 | /* show the card that we are not polling anymore */ | 381 | /* show the card that we are not polling anymore */ |
399 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | 382 | if (is_qebsm(q)) { |
400 | spin_unlock_bh(&q->u.in.lock); | 383 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, |
384 | q->u.in.ack_count); | ||
385 | q->u.in.ack_count = 0; | ||
386 | } else | ||
387 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
401 | } | 388 | } |
402 | 389 | ||
403 | static void announce_buffer_error(struct qdio_q *q) | 390 | static void announce_buffer_error(struct qdio_q *q, int count) |
404 | { | 391 | { |
405 | char dbf_text[15]; | 392 | q->qdio_error |= QDIO_ERROR_SLSB_STATE; |
393 | |||
394 | /* special handling for no target buffer empty */ | ||
395 | if ((!q->is_input_q && | ||
396 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { | ||
397 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); | ||
398 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", | ||
399 | q->first_to_check); | ||
400 | return; | ||
401 | } | ||
406 | 402 | ||
407 | if (q->is_input_q) | 403 | DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); |
408 | QDIO_DBF_TEXT3(1, trace, "inperr"); | 404 | DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); |
409 | else | 405 | DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); |
410 | QDIO_DBF_TEXT3(0, trace, "outperr"); | 406 | DBF_ERROR("F14:%2x F15:%2x", |
407 | q->sbal[q->first_to_check]->element[14].flags & 0xff, | ||
408 | q->sbal[q->first_to_check]->element[15].flags & 0xff); | ||
409 | } | ||
410 | |||
411 | static inline void inbound_primed(struct qdio_q *q, int count) | ||
412 | { | ||
413 | int new; | ||
414 | |||
415 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); | ||
416 | |||
417 | /* for QEBSM the ACK was already set by EQBS */ | ||
418 | if (is_qebsm(q)) { | ||
419 | if (!q->u.in.polling) { | ||
420 | q->u.in.polling = 1; | ||
421 | q->u.in.ack_count = count; | ||
422 | q->last_move_ftc = q->first_to_check; | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | /* delete the previous ACK's */ | ||
427 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, | ||
428 | q->u.in.ack_count); | ||
429 | q->u.in.ack_count = count; | ||
430 | q->last_move_ftc = q->first_to_check; | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * ACK the newest buffer. The ACK will be removed in qdio_stop_polling | ||
436 | * or by the next inbound run. | ||
437 | */ | ||
438 | new = add_buf(q->first_to_check, count - 1); | ||
439 | if (q->u.in.polling) { | ||
440 | /* reset the previous ACK but first set the new one */ | ||
441 | set_buf_state(q, new, SLSB_P_INPUT_ACK); | ||
442 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
443 | } | ||
444 | else { | ||
445 | q->u.in.polling = 1; | ||
446 | set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); | ||
447 | } | ||
411 | 448 | ||
412 | sprintf(dbf_text, "%x-%x-%x", q->first_to_check, | 449 | q->last_move_ftc = new; |
413 | q->sbal[q->first_to_check]->element[14].flags, | 450 | count--; |
414 | q->sbal[q->first_to_check]->element[15].flags); | 451 | if (!count) |
415 | QDIO_DBF_TEXT3(1, trace, dbf_text); | 452 | return; |
416 | QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); | ||
417 | 453 | ||
418 | q->qdio_error = QDIO_ERROR_SLSB_STATE; | 454 | /* |
455 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
456 | * we're loosing initiative in the thinint code. | ||
457 | */ | ||
458 | set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, | ||
459 | count); | ||
419 | } | 460 | } |
420 | 461 | ||
421 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 462 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -424,13 +465,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
424 | unsigned char state; | 465 | unsigned char state; |
425 | 466 | ||
426 | /* | 467 | /* |
427 | * If we still poll don't update last_move_ftc, keep the | ||
428 | * previously ACK buffer there. | ||
429 | */ | ||
430 | if (!q->u.in.polling) | ||
431 | q->last_move_ftc = q->first_to_check; | ||
432 | |||
433 | /* | ||
434 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved | 468 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved |
435 | * would return 0. | 469 | * would return 0. |
436 | */ | 470 | */ |
@@ -450,34 +484,13 @@ check_next: | |||
450 | if (q->first_to_check == stop) | 484 | if (q->first_to_check == stop) |
451 | goto out; | 485 | goto out; |
452 | 486 | ||
453 | count = get_buf_states(q, q->first_to_check, &state, count); | 487 | count = get_buf_states(q, q->first_to_check, &state, count, 1); |
454 | if (!count) | 488 | if (!count) |
455 | goto out; | 489 | goto out; |
456 | 490 | ||
457 | switch (state) { | 491 | switch (state) { |
458 | case SLSB_P_INPUT_PRIMED: | 492 | case SLSB_P_INPUT_PRIMED: |
459 | QDIO_DBF_TEXT5(0, trace, "inptprim"); | 493 | inbound_primed(q, count); |
460 | |||
461 | /* | ||
462 | * Only ACK the first buffer. The ACK will be removed in | ||
463 | * qdio_stop_polling. | ||
464 | */ | ||
465 | if (q->u.in.polling) | ||
466 | state = SLSB_P_INPUT_NOT_INIT; | ||
467 | else { | ||
468 | q->u.in.polling = 1; | ||
469 | state = SLSB_P_INPUT_ACK; | ||
470 | } | ||
471 | set_buf_state(q, q->first_to_check, state); | ||
472 | |||
473 | /* | ||
474 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
475 | * we're loosing initiative in the thinint code. | ||
476 | */ | ||
477 | if (count > 1) | ||
478 | set_buf_states(q, next_buf(q->first_to_check), | ||
479 | SLSB_P_INPUT_NOT_INIT, count - 1); | ||
480 | |||
481 | /* | 494 | /* |
482 | * No siga-sync needed for non-qebsm here, as the inbound queue | 495 | * No siga-sync needed for non-qebsm here, as the inbound queue |
483 | * will be synced on the next siga-r, resp. | 496 | * will be synced on the next siga-r, resp. |
@@ -487,7 +500,7 @@ check_next: | |||
487 | atomic_sub(count, &q->nr_buf_used); | 500 | atomic_sub(count, &q->nr_buf_used); |
488 | goto check_next; | 501 | goto check_next; |
489 | case SLSB_P_INPUT_ERROR: | 502 | case SLSB_P_INPUT_ERROR: |
490 | announce_buffer_error(q); | 503 | announce_buffer_error(q, count); |
491 | /* process the buffer, the upper layer will take care of it */ | 504 | /* process the buffer, the upper layer will take care of it */ |
492 | q->first_to_check = add_buf(q->first_to_check, count); | 505 | q->first_to_check = add_buf(q->first_to_check, count); |
493 | atomic_sub(count, &q->nr_buf_used); | 506 | atomic_sub(count, &q->nr_buf_used); |
@@ -495,13 +508,12 @@ check_next: | |||
495 | case SLSB_CU_INPUT_EMPTY: | 508 | case SLSB_CU_INPUT_EMPTY: |
496 | case SLSB_P_INPUT_NOT_INIT: | 509 | case SLSB_P_INPUT_NOT_INIT: |
497 | case SLSB_P_INPUT_ACK: | 510 | case SLSB_P_INPUT_ACK: |
498 | QDIO_DBF_TEXT5(0, trace, "inpnipro"); | 511 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); |
499 | break; | 512 | break; |
500 | default: | 513 | default: |
501 | BUG(); | 514 | BUG(); |
502 | } | 515 | } |
503 | out: | 516 | out: |
504 | QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); | ||
505 | return q->first_to_check; | 517 | return q->first_to_check; |
506 | } | 518 | } |
507 | 519 | ||
@@ -515,8 +527,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
515 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 527 | if (!need_siga_sync(q) && !pci_out_supported(q)) |
516 | q->u.in.timestamp = get_usecs(); | 528 | q->u.in.timestamp = get_usecs(); |
517 | 529 | ||
518 | QDIO_DBF_TEXT4(0, trace, "inhasmvd"); | 530 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); |
519 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
520 | return 1; | 531 | return 1; |
521 | } else | 532 | } else |
522 | return 0; | 533 | return 0; |
@@ -524,10 +535,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
524 | 535 | ||
525 | static int qdio_inbound_q_done(struct qdio_q *q) | 536 | static int qdio_inbound_q_done(struct qdio_q *q) |
526 | { | 537 | { |
527 | unsigned char state; | 538 | unsigned char state = 0; |
528 | #ifdef CONFIG_QDIO_DEBUG | ||
529 | char dbf_text[15]; | ||
530 | #endif | ||
531 | 539 | ||
532 | if (!atomic_read(&q->nr_buf_used)) | 540 | if (!atomic_read(&q->nr_buf_used)) |
533 | return 1; | 541 | return 1; |
@@ -538,7 +546,7 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
538 | */ | 546 | */ |
539 | qdio_siga_sync_q(q); | 547 | qdio_siga_sync_q(q); |
540 | 548 | ||
541 | get_buf_state(q, q->first_to_check, &state); | 549 | get_buf_state(q, q->first_to_check, &state, 0); |
542 | if (state == SLSB_P_INPUT_PRIMED) | 550 | if (state == SLSB_P_INPUT_PRIMED) |
543 | /* we got something to do */ | 551 | /* we got something to do */ |
544 | return 0; | 552 | return 0; |
@@ -552,20 +560,12 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
552 | * has (probably) not moved (see qdio_inbound_processing). | 560 | * has (probably) not moved (see qdio_inbound_processing). |
553 | */ | 561 | */ |
554 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { | 562 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { |
555 | #ifdef CONFIG_QDIO_DEBUG | 563 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
556 | QDIO_DBF_TEXT4(0, trace, "inqisdon"); | 564 | q->first_to_check); |
557 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
558 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
559 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
560 | #endif /* CONFIG_QDIO_DEBUG */ | ||
561 | return 1; | 565 | return 1; |
562 | } else { | 566 | } else { |
563 | #ifdef CONFIG_QDIO_DEBUG | 567 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", |
564 | QDIO_DBF_TEXT4(0, trace, "inqisntd"); | 568 | q->first_to_check); |
565 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
566 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
567 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
568 | #endif /* CONFIG_QDIO_DEBUG */ | ||
569 | return 0; | 569 | return 0; |
570 | } | 570 | } |
571 | } | 571 | } |
@@ -573,9 +573,6 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
573 | void qdio_kick_inbound_handler(struct qdio_q *q) | 573 | void qdio_kick_inbound_handler(struct qdio_q *q) |
574 | { | 574 | { |
575 | int count, start, end; | 575 | int count, start, end; |
576 | #ifdef CONFIG_QDIO_DEBUG | ||
577 | char dbf_text[15]; | ||
578 | #endif | ||
579 | 576 | ||
580 | qdio_perf_stat_inc(&perf_stats.inbound_handler); | 577 | qdio_perf_stat_inc(&perf_stats.inbound_handler); |
581 | 578 | ||
@@ -586,10 +583,7 @@ void qdio_kick_inbound_handler(struct qdio_q *q) | |||
586 | else | 583 | else |
587 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 584 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
588 | 585 | ||
589 | #ifdef CONFIG_QDIO_DEBUG | 586 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); |
590 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
591 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
592 | #endif /* CONFIG_QDIO_DEBUG */ | ||
593 | 587 | ||
594 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 588 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
595 | return; | 589 | return; |
@@ -655,14 +649,14 @@ check_next: | |||
655 | if (q->first_to_check == stop) | 649 | if (q->first_to_check == stop) |
656 | return q->first_to_check; | 650 | return q->first_to_check; |
657 | 651 | ||
658 | count = get_buf_states(q, q->first_to_check, &state, count); | 652 | count = get_buf_states(q, q->first_to_check, &state, count, 0); |
659 | if (!count) | 653 | if (!count) |
660 | return q->first_to_check; | 654 | return q->first_to_check; |
661 | 655 | ||
662 | switch (state) { | 656 | switch (state) { |
663 | case SLSB_P_OUTPUT_EMPTY: | 657 | case SLSB_P_OUTPUT_EMPTY: |
664 | /* the adapter got it */ | 658 | /* the adapter got it */ |
665 | QDIO_DBF_TEXT5(0, trace, "outpempt"); | 659 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); |
666 | 660 | ||
667 | atomic_sub(count, &q->nr_buf_used); | 661 | atomic_sub(count, &q->nr_buf_used); |
668 | q->first_to_check = add_buf(q->first_to_check, count); | 662 | q->first_to_check = add_buf(q->first_to_check, count); |
@@ -674,14 +668,14 @@ check_next: | |||
674 | break; | 668 | break; |
675 | goto check_next; | 669 | goto check_next; |
676 | case SLSB_P_OUTPUT_ERROR: | 670 | case SLSB_P_OUTPUT_ERROR: |
677 | announce_buffer_error(q); | 671 | announce_buffer_error(q, count); |
678 | /* process the buffer, the upper layer will take care of it */ | 672 | /* process the buffer, the upper layer will take care of it */ |
679 | q->first_to_check = add_buf(q->first_to_check, count); | 673 | q->first_to_check = add_buf(q->first_to_check, count); |
680 | atomic_sub(count, &q->nr_buf_used); | 674 | atomic_sub(count, &q->nr_buf_used); |
681 | break; | 675 | break; |
682 | case SLSB_CU_OUTPUT_PRIMED: | 676 | case SLSB_CU_OUTPUT_PRIMED: |
683 | /* the adapter has not fetched the output yet */ | 677 | /* the adapter has not fetched the output yet */ |
684 | QDIO_DBF_TEXT5(0, trace, "outpprim"); | 678 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); |
685 | break; | 679 | break; |
686 | case SLSB_P_OUTPUT_NOT_INIT: | 680 | case SLSB_P_OUTPUT_NOT_INIT: |
687 | case SLSB_P_OUTPUT_HALTED: | 681 | case SLSB_P_OUTPUT_HALTED: |
@@ -706,99 +700,48 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) | |||
706 | 700 | ||
707 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { | 701 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { |
708 | q->last_move_ftc = bufnr; | 702 | q->last_move_ftc = bufnr; |
709 | QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); | 703 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); |
710 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
711 | return 1; | 704 | return 1; |
712 | } else | 705 | } else |
713 | return 0; | 706 | return 0; |
714 | } | 707 | } |
715 | 708 | ||
716 | /* | ||
717 | * VM could present us cc=2 and busy bit set on SIGA-write | ||
718 | * during reconfiguration of their Guest LAN (only in iqdio mode, | ||
719 | * otherwise qdio is asynchronous and cc=2 and busy bit there will take | ||
720 | * the queues down immediately). | ||
721 | * | ||
722 | * Therefore qdio_siga_output will try for a short time constantly, | ||
723 | * if such a condition occurs. If it doesn't change, it will | ||
724 | * increase the busy_siga_counter and save the timestamp, and | ||
725 | * schedule the queue for later processing. qdio_outbound_processing | ||
726 | * will check out the counter. If non-zero, it will call qdio_kick_outbound_q | ||
727 | * as often as the value of the counter. This will attempt further SIGA | ||
728 | * instructions. For each successful SIGA, the counter is | ||
729 | * decreased, for failing SIGAs the counter remains the same, after | ||
730 | * all. After some time of no movement, qdio_kick_outbound_q will | ||
731 | * finally fail and reflect corresponding error codes to call | ||
732 | * the upper layer module and have it take the queues down. | ||
733 | * | ||
734 | * Note that this is a change from the original HiperSockets design | ||
735 | * (saying cc=2 and busy bit means take the queues down), but in | ||
736 | * these days Guest LAN didn't exist... excessive cc=2 with busy bit | ||
737 | * conditions will still take the queues down, but the threshold is | ||
738 | * higher due to the Guest LAN environment. | ||
739 | * | ||
740 | * Called from outbound tasklet and do_QDIO handler. | ||
741 | */ | ||
742 | static void qdio_kick_outbound_q(struct qdio_q *q) | 709 | static void qdio_kick_outbound_q(struct qdio_q *q) |
743 | { | 710 | { |
744 | int rc; | 711 | unsigned int busy_bit; |
745 | #ifdef CONFIG_QDIO_DEBUG | 712 | int cc; |
746 | char dbf_text[15]; | ||
747 | |||
748 | QDIO_DBF_TEXT5(0, trace, "kickoutq"); | ||
749 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
750 | #endif /* CONFIG_QDIO_DEBUG */ | ||
751 | 713 | ||
752 | if (!need_siga_out(q)) | 714 | if (!need_siga_out(q)) |
753 | return; | 715 | return; |
754 | 716 | ||
755 | rc = qdio_siga_output(q); | 717 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); |
756 | switch (rc) { | 718 | qdio_perf_stat_inc(&perf_stats.siga_out); |
719 | |||
720 | cc = qdio_siga_output(q, &busy_bit); | ||
721 | switch (cc) { | ||
757 | case 0: | 722 | case 0: |
758 | /* TODO: improve error handling for CC=0 case */ | ||
759 | #ifdef CONFIG_QDIO_DEBUG | ||
760 | if (q->u.out.timestamp) { | ||
761 | QDIO_DBF_TEXT3(0, trace, "cc2reslv"); | ||
762 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, | ||
763 | q->nr, | ||
764 | atomic_read(&q->u.out.busy_siga_counter)); | ||
765 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
766 | } | ||
767 | #endif /* CONFIG_QDIO_DEBUG */ | ||
768 | /* went smooth this time, reset timestamp */ | ||
769 | q->u.out.timestamp = 0; | ||
770 | break; | 723 | break; |
771 | /* cc=2 and busy bit */ | 724 | case 2: |
772 | case (2 | QDIO_ERROR_SIGA_BUSY): | 725 | if (busy_bit) { |
773 | atomic_inc(&q->u.out.busy_siga_counter); | 726 | DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); |
774 | 727 | q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; | |
775 | /* if the last siga was successful, save timestamp here */ | 728 | } else { |
776 | if (!q->u.out.timestamp) | 729 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", |
777 | q->u.out.timestamp = get_usecs(); | 730 | q->nr); |
778 | 731 | q->qdio_error = cc; | |
779 | /* if we're in time, don't touch qdio_error */ | ||
780 | if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { | ||
781 | tasklet_schedule(&q->tasklet); | ||
782 | break; | ||
783 | } | 732 | } |
784 | QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); | 733 | break; |
785 | #ifdef CONFIG_QDIO_DEBUG | 734 | case 1: |
786 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, | 735 | case 3: |
787 | atomic_read(&q->u.out.busy_siga_counter)); | 736 | DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); |
788 | QDIO_DBF_TEXT3(0, trace, dbf_text); | 737 | q->qdio_error = cc; |
789 | #endif /* CONFIG_QDIO_DEBUG */ | 738 | break; |
790 | default: | ||
791 | /* for plain cc=1, 2 or 3 */ | ||
792 | q->qdio_error = rc; | ||
793 | } | 739 | } |
794 | } | 740 | } |
795 | 741 | ||
796 | static void qdio_kick_outbound_handler(struct qdio_q *q) | 742 | static void qdio_kick_outbound_handler(struct qdio_q *q) |
797 | { | 743 | { |
798 | int start, end, count; | 744 | int start, end, count; |
799 | #ifdef CONFIG_QDIO_DEBUG | ||
800 | char dbf_text[15]; | ||
801 | #endif | ||
802 | 745 | ||
803 | start = q->first_to_kick; | 746 | start = q->first_to_kick; |
804 | end = q->last_move_ftc; | 747 | end = q->last_move_ftc; |
@@ -807,13 +750,8 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
807 | else | 750 | else |
808 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 751 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
809 | 752 | ||
810 | #ifdef CONFIG_QDIO_DEBUG | 753 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr); |
811 | QDIO_DBF_TEXT4(0, trace, "kickouth"); | 754 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); |
812 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
813 | |||
814 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
815 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
816 | #endif /* CONFIG_QDIO_DEBUG */ | ||
817 | 755 | ||
818 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 756 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
819 | return; | 757 | return; |
@@ -828,22 +766,18 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
828 | 766 | ||
829 | static void __qdio_outbound_processing(struct qdio_q *q) | 767 | static void __qdio_outbound_processing(struct qdio_q *q) |
830 | { | 768 | { |
831 | int siga_attempts; | 769 | unsigned long flags; |
832 | 770 | ||
833 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); | 771 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); |
834 | 772 | spin_lock_irqsave(&q->lock, flags); | |
835 | /* see comment in qdio_kick_outbound_q */ | ||
836 | siga_attempts = atomic_read(&q->u.out.busy_siga_counter); | ||
837 | while (siga_attempts--) { | ||
838 | atomic_dec(&q->u.out.busy_siga_counter); | ||
839 | qdio_kick_outbound_q(q); | ||
840 | } | ||
841 | 773 | ||
842 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); | 774 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); |
843 | 775 | ||
844 | if (qdio_outbound_q_moved(q)) | 776 | if (qdio_outbound_q_moved(q)) |
845 | qdio_kick_outbound_handler(q); | 777 | qdio_kick_outbound_handler(q); |
846 | 778 | ||
779 | spin_unlock_irqrestore(&q->lock, flags); | ||
780 | |||
847 | if (queue_type(q) == QDIO_ZFCP_QFMT) { | 781 | if (queue_type(q) == QDIO_ZFCP_QFMT) { |
848 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) | 782 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) |
849 | tasklet_schedule(&q->tasklet); | 783 | tasklet_schedule(&q->tasklet); |
@@ -908,27 +842,18 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
908 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 842 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
909 | enum qdio_irq_states state) | 843 | enum qdio_irq_states state) |
910 | { | 844 | { |
911 | #ifdef CONFIG_QDIO_DEBUG | 845 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); |
912 | char dbf_text[15]; | ||
913 | |||
914 | QDIO_DBF_TEXT5(0, trace, "newstate"); | ||
915 | sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); | ||
916 | QDIO_DBF_TEXT5(0, trace, dbf_text); | ||
917 | #endif /* CONFIG_QDIO_DEBUG */ | ||
918 | 846 | ||
919 | irq_ptr->state = state; | 847 | irq_ptr->state = state; |
920 | mb(); | 848 | mb(); |
921 | } | 849 | } |
922 | 850 | ||
923 | static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | 851 | static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) |
924 | { | 852 | { |
925 | char dbf_text[15]; | ||
926 | |||
927 | if (irb->esw.esw0.erw.cons) { | 853 | if (irb->esw.esw0.erw.cons) { |
928 | sprintf(dbf_text, "sens%4x", schid.sch_no); | 854 | DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); |
929 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 855 | DBF_ERROR_HEX(irb, 64); |
930 | QDIO_DBF_HEX0(0, trace, irb, 64); | 856 | DBF_ERROR_HEX(irb->ecw, 64); |
931 | QDIO_DBF_HEX0(0, trace, irb->ecw, 64); | ||
932 | } | 857 | } |
933 | } | 858 | } |
934 | 859 | ||
@@ -962,14 +887,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, | |||
962 | { | 887 | { |
963 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 888 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
964 | struct qdio_q *q; | 889 | struct qdio_q *q; |
965 | char dbf_text[15]; | ||
966 | 890 | ||
967 | QDIO_DBF_TEXT2(1, trace, "ick2"); | 891 | DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); |
968 | sprintf(dbf_text, "%s", dev_name(&cdev->dev)); | 892 | DBF_ERROR("intp :%lx", intparm); |
969 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 893 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
970 | QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); | ||
971 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | ||
972 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | ||
973 | 894 | ||
974 | if (irq_ptr->nr_input_qs) { | 895 | if (irq_ptr->nr_input_qs) { |
975 | q = irq_ptr->input_qs[0]; | 896 | q = irq_ptr->input_qs[0]; |
@@ -1022,28 +943,29 @@ static void qdio_int_error(struct ccw_device *cdev) | |||
1022 | } | 943 | } |
1023 | 944 | ||
1024 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, | 945 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, |
1025 | int dstat) | 946 | int dstat) |
1026 | { | 947 | { |
1027 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 948 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1028 | 949 | ||
1029 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | 950 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { |
1030 | QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); | 951 | DBF_ERROR("EQ:ck con"); |
1031 | goto error; | 952 | goto error; |
1032 | } | 953 | } |
1033 | 954 | ||
1034 | if (!(dstat & DEV_STAT_DEV_END)) { | 955 | if (!(dstat & DEV_STAT_DEV_END)) { |
1035 | QDIO_DBF_TEXT2(1, setup, "eq:no de"); | 956 | DBF_ERROR("EQ:no dev"); |
1036 | goto error; | 957 | goto error; |
1037 | } | 958 | } |
1038 | 959 | ||
1039 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { | 960 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { |
1040 | QDIO_DBF_TEXT2(1, setup, "eq:badio"); | 961 | DBF_ERROR("EQ: bad io"); |
1041 | goto error; | 962 | goto error; |
1042 | } | 963 | } |
1043 | return 0; | 964 | return 0; |
1044 | error: | 965 | error: |
1045 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | 966 | DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); |
1046 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | 967 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
968 | |||
1047 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | 969 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); |
1048 | return 1; | 970 | return 1; |
1049 | } | 971 | } |
@@ -1052,12 +974,8 @@ static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, | |||
1052 | int dstat) | 974 | int dstat) |
1053 | { | 975 | { |
1054 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 976 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1055 | char dbf_text[15]; | ||
1056 | |||
1057 | sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); | ||
1058 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1059 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1060 | 977 | ||
978 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); | ||
1061 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) | 979 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) |
1062 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); | 980 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); |
1063 | } | 981 | } |
@@ -1068,25 +986,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1068 | { | 986 | { |
1069 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 987 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1070 | int cstat, dstat; | 988 | int cstat, dstat; |
1071 | char dbf_text[15]; | ||
1072 | 989 | ||
1073 | qdio_perf_stat_inc(&perf_stats.qdio_int); | 990 | qdio_perf_stat_inc(&perf_stats.qdio_int); |
1074 | 991 | ||
1075 | if (!intparm || !irq_ptr) { | 992 | if (!intparm || !irq_ptr) { |
1076 | sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); | 993 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); |
1077 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1078 | return; | 994 | return; |
1079 | } | 995 | } |
1080 | 996 | ||
1081 | if (IS_ERR(irb)) { | 997 | if (IS_ERR(irb)) { |
1082 | switch (PTR_ERR(irb)) { | 998 | switch (PTR_ERR(irb)) { |
1083 | case -EIO: | 999 | case -EIO: |
1084 | sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); | 1000 | DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); |
1085 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1086 | return; | 1001 | return; |
1087 | case -ETIMEDOUT: | 1002 | case -ETIMEDOUT: |
1088 | sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); | 1003 | DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); |
1089 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1090 | qdio_int_error(cdev); | 1004 | qdio_int_error(cdev); |
1091 | return; | 1005 | return; |
1092 | default: | 1006 | default: |
@@ -1094,7 +1008,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1094 | return; | 1008 | return; |
1095 | } | 1009 | } |
1096 | } | 1010 | } |
1097 | qdio_irq_check_sense(irq_ptr->schid, irb); | 1011 | qdio_irq_check_sense(irq_ptr, irb); |
1098 | 1012 | ||
1099 | cstat = irb->scsw.cmd.cstat; | 1013 | cstat = irb->scsw.cmd.cstat; |
1100 | dstat = irb->scsw.cmd.dstat; | 1014 | dstat = irb->scsw.cmd.dstat; |
@@ -1129,23 +1043,20 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1129 | /** | 1043 | /** |
1130 | * qdio_get_ssqd_desc - get qdio subchannel description | 1044 | * qdio_get_ssqd_desc - get qdio subchannel description |
1131 | * @cdev: ccw device to get description for | 1045 | * @cdev: ccw device to get description for |
1046 | * @data: where to store the ssqd | ||
1132 | * | 1047 | * |
1133 | * Returns a pointer to the saved qdio subchannel description, | 1048 | * Returns 0 or an error code. The results of the chsc are stored in the |
1134 | * or NULL for not setup qdio devices. | 1049 | * specified structure. |
1135 | */ | 1050 | */ |
1136 | struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) | 1051 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1052 | struct qdio_ssqd_desc *data) | ||
1137 | { | 1053 | { |
1138 | struct qdio_irq *irq_ptr; | ||
1139 | char dbf_text[15]; | ||
1140 | |||
1141 | sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); | ||
1142 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1143 | 1054 | ||
1144 | irq_ptr = cdev->private->qdio_data; | 1055 | if (!cdev || !cdev->private) |
1145 | if (!irq_ptr) | 1056 | return -EINVAL; |
1146 | return NULL; | ||
1147 | 1057 | ||
1148 | return &irq_ptr->ssqd_desc; | 1058 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); |
1059 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | ||
1149 | } | 1060 | } |
1150 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1061 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1151 | 1062 | ||
@@ -1159,14 +1070,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | |||
1159 | */ | 1070 | */ |
1160 | int qdio_cleanup(struct ccw_device *cdev, int how) | 1071 | int qdio_cleanup(struct ccw_device *cdev, int how) |
1161 | { | 1072 | { |
1162 | struct qdio_irq *irq_ptr; | 1073 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1163 | char dbf_text[15]; | ||
1164 | int rc; | 1074 | int rc; |
1165 | 1075 | ||
1166 | sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); | ||
1167 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1168 | |||
1169 | irq_ptr = cdev->private->qdio_data; | ||
1170 | if (!irq_ptr) | 1076 | if (!irq_ptr) |
1171 | return -ENODEV; | 1077 | return -ENODEV; |
1172 | 1078 | ||
@@ -1199,18 +1105,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1199 | */ | 1105 | */ |
1200 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1106 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1201 | { | 1107 | { |
1202 | struct qdio_irq *irq_ptr; | 1108 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1203 | int rc; | 1109 | int rc; |
1204 | unsigned long flags; | 1110 | unsigned long flags; |
1205 | char dbf_text[15]; | ||
1206 | 1111 | ||
1207 | sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); | ||
1208 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1209 | |||
1210 | irq_ptr = cdev->private->qdio_data; | ||
1211 | if (!irq_ptr) | 1112 | if (!irq_ptr) |
1212 | return -ENODEV; | 1113 | return -ENODEV; |
1213 | 1114 | ||
1115 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | ||
1116 | |||
1214 | mutex_lock(&irq_ptr->setup_mutex); | 1117 | mutex_lock(&irq_ptr->setup_mutex); |
1215 | /* | 1118 | /* |
1216 | * Subchannel was already shot down. We cannot prevent being called | 1119 | * Subchannel was already shot down. We cannot prevent being called |
@@ -1234,10 +1137,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1234 | /* default behaviour is halt */ | 1137 | /* default behaviour is halt */ |
1235 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); | 1138 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); |
1236 | if (rc) { | 1139 | if (rc) { |
1237 | sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); | 1140 | DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); |
1238 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 1141 | DBF_ERROR("rc:%4d", rc); |
1239 | sprintf(dbf_text, "rc=%d", rc); | ||
1240 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1241 | goto no_cleanup; | 1142 | goto no_cleanup; |
1242 | } | 1143 | } |
1243 | 1144 | ||
@@ -1271,17 +1172,18 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1271 | */ | 1172 | */ |
1272 | int qdio_free(struct ccw_device *cdev) | 1173 | int qdio_free(struct ccw_device *cdev) |
1273 | { | 1174 | { |
1274 | struct qdio_irq *irq_ptr; | 1175 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1275 | char dbf_text[15]; | ||
1276 | |||
1277 | sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); | ||
1278 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1279 | 1176 | ||
1280 | irq_ptr = cdev->private->qdio_data; | ||
1281 | if (!irq_ptr) | 1177 | if (!irq_ptr) |
1282 | return -ENODEV; | 1178 | return -ENODEV; |
1283 | 1179 | ||
1180 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | ||
1284 | mutex_lock(&irq_ptr->setup_mutex); | 1181 | mutex_lock(&irq_ptr->setup_mutex); |
1182 | |||
1183 | if (irq_ptr->debug_area != NULL) { | ||
1184 | debug_unregister(irq_ptr->debug_area); | ||
1185 | irq_ptr->debug_area = NULL; | ||
1186 | } | ||
1285 | cdev->private->qdio_data = NULL; | 1187 | cdev->private->qdio_data = NULL; |
1286 | mutex_unlock(&irq_ptr->setup_mutex); | 1188 | mutex_unlock(&irq_ptr->setup_mutex); |
1287 | 1189 | ||
@@ -1300,10 +1202,6 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1300 | int qdio_initialize(struct qdio_initialize *init_data) | 1202 | int qdio_initialize(struct qdio_initialize *init_data) |
1301 | { | 1203 | { |
1302 | int rc; | 1204 | int rc; |
1303 | char dbf_text[15]; | ||
1304 | |||
1305 | sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); | ||
1306 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1307 | 1205 | ||
1308 | rc = qdio_allocate(init_data); | 1206 | rc = qdio_allocate(init_data); |
1309 | if (rc) | 1207 | if (rc) |
@@ -1323,10 +1221,8 @@ EXPORT_SYMBOL_GPL(qdio_initialize); | |||
1323 | int qdio_allocate(struct qdio_initialize *init_data) | 1221 | int qdio_allocate(struct qdio_initialize *init_data) |
1324 | { | 1222 | { |
1325 | struct qdio_irq *irq_ptr; | 1223 | struct qdio_irq *irq_ptr; |
1326 | char dbf_text[15]; | ||
1327 | 1224 | ||
1328 | sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); | 1225 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); |
1329 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1330 | 1226 | ||
1331 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1227 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1332 | (init_data->no_output_qs && !init_data->output_handler)) | 1228 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1340,16 +1236,13 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1340 | (!init_data->output_sbal_addr_array)) | 1236 | (!init_data->output_sbal_addr_array)) |
1341 | return -EINVAL; | 1237 | return -EINVAL; |
1342 | 1238 | ||
1343 | qdio_allocate_do_dbf(init_data); | ||
1344 | |||
1345 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ | 1239 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ |
1346 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1240 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1347 | if (!irq_ptr) | 1241 | if (!irq_ptr) |
1348 | goto out_err; | 1242 | goto out_err; |
1349 | QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); | ||
1350 | QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); | ||
1351 | 1243 | ||
1352 | mutex_init(&irq_ptr->setup_mutex); | 1244 | mutex_init(&irq_ptr->setup_mutex); |
1245 | qdio_allocate_dbf(init_data, irq_ptr); | ||
1353 | 1246 | ||
1354 | /* | 1247 | /* |
1355 | * Allocate a page for the chsc calls in qdio_establish. | 1248 | * Allocate a page for the chsc calls in qdio_establish. |
@@ -1367,9 +1260,6 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1367 | goto out_rel; | 1260 | goto out_rel; |
1368 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); | 1261 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); |
1369 | 1262 | ||
1370 | QDIO_DBF_TEXT0(0, setup, "qdr:"); | ||
1371 | QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); | ||
1372 | |||
1373 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, | 1263 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, |
1374 | init_data->no_output_qs)) | 1264 | init_data->no_output_qs)) |
1375 | goto out_rel; | 1265 | goto out_rel; |
@@ -1390,14 +1280,12 @@ EXPORT_SYMBOL_GPL(qdio_allocate); | |||
1390 | */ | 1280 | */ |
1391 | int qdio_establish(struct qdio_initialize *init_data) | 1281 | int qdio_establish(struct qdio_initialize *init_data) |
1392 | { | 1282 | { |
1393 | char dbf_text[20]; | ||
1394 | struct qdio_irq *irq_ptr; | 1283 | struct qdio_irq *irq_ptr; |
1395 | struct ccw_device *cdev = init_data->cdev; | 1284 | struct ccw_device *cdev = init_data->cdev; |
1396 | unsigned long saveflags; | 1285 | unsigned long saveflags; |
1397 | int rc; | 1286 | int rc; |
1398 | 1287 | ||
1399 | sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); | 1288 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); |
1400 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1401 | 1289 | ||
1402 | irq_ptr = cdev->private->qdio_data; | 1290 | irq_ptr = cdev->private->qdio_data; |
1403 | if (!irq_ptr) | 1291 | if (!irq_ptr) |
@@ -1427,10 +1315,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1427 | 1315 | ||
1428 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1316 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1429 | if (rc) { | 1317 | if (rc) { |
1430 | sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); | 1318 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1431 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1319 | DBF_ERROR("rc:%4x", rc); |
1432 | sprintf(dbf_text, "eq:rc%4x", rc); | ||
1433 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1434 | } | 1320 | } |
1435 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1321 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1436 | 1322 | ||
@@ -1451,10 +1337,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1451 | } | 1337 | } |
1452 | 1338 | ||
1453 | qdio_setup_ssqd_info(irq_ptr); | 1339 | qdio_setup_ssqd_info(irq_ptr); |
1454 | sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc); | 1340 | DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); |
1455 | QDIO_DBF_TEXT2(0, setup, dbf_text); | 1341 | DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); |
1456 | sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); | ||
1457 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
1458 | 1342 | ||
1459 | /* qebsm is now setup if available, initialize buffer states */ | 1343 | /* qebsm is now setup if available, initialize buffer states */ |
1460 | qdio_init_buf_states(irq_ptr); | 1344 | qdio_init_buf_states(irq_ptr); |
@@ -1475,10 +1359,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1475 | struct qdio_irq *irq_ptr; | 1359 | struct qdio_irq *irq_ptr; |
1476 | int rc; | 1360 | int rc; |
1477 | unsigned long saveflags; | 1361 | unsigned long saveflags; |
1478 | char dbf_text[20]; | ||
1479 | 1362 | ||
1480 | sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); | 1363 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); |
1481 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1482 | 1364 | ||
1483 | irq_ptr = cdev->private->qdio_data; | 1365 | irq_ptr = cdev->private->qdio_data; |
1484 | if (!irq_ptr) | 1366 | if (!irq_ptr) |
@@ -1504,10 +1386,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1504 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1386 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1505 | 0, DOIO_DENY_PREFETCH); | 1387 | 0, DOIO_DENY_PREFETCH); |
1506 | if (rc) { | 1388 | if (rc) { |
1507 | sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); | 1389 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1508 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1390 | DBF_ERROR("rc:%4x", rc); |
1509 | sprintf(dbf_text, "aq:rc%4x", rc); | ||
1510 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1511 | } | 1391 | } |
1512 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1392 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1513 | 1393 | ||
@@ -1565,23 +1445,38 @@ static inline int buf_in_between(int bufnr, int start, int count) | |||
1565 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, | 1445 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, |
1566 | int bufnr, int count) | 1446 | int bufnr, int count) |
1567 | { | 1447 | { |
1568 | unsigned long flags; | 1448 | int used, cc, diff; |
1569 | int used, rc; | ||
1570 | 1449 | ||
1571 | /* | 1450 | if (!q->u.in.polling) |
1572 | * do_QDIO could run in parallel with the queue tasklet so the | 1451 | goto set; |
1573 | * upper-layer programm could empty the ACK'ed buffer here. | 1452 | |
1574 | * If that happens we must clear the polling flag, otherwise | 1453 | /* protect against stop polling setting an ACK for an emptied slsb */ |
1575 | * qdio_stop_polling() could set the buffer to NOT_INIT after | 1454 | if (count == QDIO_MAX_BUFFERS_PER_Q) { |
1576 | * it was set to EMPTY which would kill us. | 1455 | /* overwriting everything, just delete polling status */ |
1577 | */ | 1456 | q->u.in.polling = 0; |
1578 | spin_lock_irqsave(&q->u.in.lock, flags); | 1457 | q->u.in.ack_count = 0; |
1579 | if (q->u.in.polling) | 1458 | goto set; |
1580 | if (buf_in_between(q->last_move_ftc, bufnr, count)) | 1459 | } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { |
1460 | if (is_qebsm(q)) { | ||
1461 | /* partial overwrite, just update last_move_ftc */ | ||
1462 | diff = add_buf(bufnr, count); | ||
1463 | diff = sub_buf(diff, q->last_move_ftc); | ||
1464 | q->u.in.ack_count -= diff; | ||
1465 | if (q->u.in.ack_count <= 0) { | ||
1466 | q->u.in.polling = 0; | ||
1467 | q->u.in.ack_count = 0; | ||
1468 | /* TODO: must we set last_move_ftc to something meaningful? */ | ||
1469 | goto set; | ||
1470 | } | ||
1471 | q->last_move_ftc = add_buf(q->last_move_ftc, diff); | ||
1472 | } | ||
1473 | else | ||
1474 | /* the only ACK will be deleted, so stop polling */ | ||
1581 | q->u.in.polling = 0; | 1475 | q->u.in.polling = 0; |
1476 | } | ||
1582 | 1477 | ||
1478 | set: | ||
1583 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); | 1479 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); |
1584 | spin_unlock_irqrestore(&q->u.in.lock, flags); | ||
1585 | 1480 | ||
1586 | used = atomic_add_return(count, &q->nr_buf_used) - count; | 1481 | used = atomic_add_return(count, &q->nr_buf_used) - count; |
1587 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); | 1482 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); |
@@ -1591,9 +1486,9 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
1591 | return; | 1486 | return; |
1592 | 1487 | ||
1593 | if (need_siga_in(q)) { | 1488 | if (need_siga_in(q)) { |
1594 | rc = qdio_siga_input(q); | 1489 | cc = qdio_siga_input(q); |
1595 | if (rc) | 1490 | if (cc) |
1596 | q->qdio_error = rc; | 1491 | q->qdio_error = cc; |
1597 | } | 1492 | } |
1598 | } | 1493 | } |
1599 | 1494 | ||
@@ -1640,6 +1535,10 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1640 | while (count--) | 1535 | while (count--) |
1641 | qdio_kick_outbound_q(q); | 1536 | qdio_kick_outbound_q(q); |
1642 | } | 1537 | } |
1538 | |||
1539 | /* report CC=2 conditions synchronously */ | ||
1540 | if (q->qdio_error) | ||
1541 | __qdio_outbound_processing(q); | ||
1643 | goto out; | 1542 | goto out; |
1644 | } | 1543 | } |
1645 | 1544 | ||
@@ -1649,11 +1548,11 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1649 | } | 1548 | } |
1650 | 1549 | ||
1651 | /* try to fast requeue buffers */ | 1550 | /* try to fast requeue buffers */ |
1652 | get_buf_state(q, prev_buf(bufnr), &state); | 1551 | get_buf_state(q, prev_buf(bufnr), &state, 0); |
1653 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1552 | if (state != SLSB_CU_OUTPUT_PRIMED) |
1654 | qdio_kick_outbound_q(q); | 1553 | qdio_kick_outbound_q(q); |
1655 | else { | 1554 | else { |
1656 | QDIO_DBF_TEXT5(0, trace, "fast-req"); | 1555 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); |
1657 | qdio_perf_stat_inc(&perf_stats.fast_requeue); | 1556 | qdio_perf_stat_inc(&perf_stats.fast_requeue); |
1658 | } | 1557 | } |
1659 | out: | 1558 | out: |
@@ -1673,12 +1572,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1673 | int q_nr, int bufnr, int count) | 1572 | int q_nr, int bufnr, int count) |
1674 | { | 1573 | { |
1675 | struct qdio_irq *irq_ptr; | 1574 | struct qdio_irq *irq_ptr; |
1676 | #ifdef CONFIG_QDIO_DEBUG | ||
1677 | char dbf_text[20]; | ||
1678 | |||
1679 | sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); | ||
1680 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1681 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1682 | 1575 | ||
1683 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || | 1576 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || |
1684 | (count > QDIO_MAX_BUFFERS_PER_Q) || | 1577 | (count > QDIO_MAX_BUFFERS_PER_Q) || |
@@ -1692,33 +1585,24 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1692 | if (!irq_ptr) | 1585 | if (!irq_ptr) |
1693 | return -ENODEV; | 1586 | return -ENODEV; |
1694 | 1587 | ||
1695 | #ifdef CONFIG_QDIO_DEBUG | ||
1696 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1588 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1697 | QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], | 1589 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); |
1698 | sizeof(void *)); | ||
1699 | else | 1590 | else |
1700 | QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], | 1591 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); |
1701 | sizeof(void *)); | 1592 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); |
1702 | 1593 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); | |
1703 | sprintf(dbf_text, "flag%04x", callflags); | ||
1704 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1705 | sprintf(dbf_text, "qi%02xct%02x", bufnr, count); | ||
1706 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1707 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1708 | 1594 | ||
1709 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) | 1595 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) |
1710 | return -EBUSY; | 1596 | return -EBUSY; |
1711 | 1597 | ||
1712 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1598 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1713 | handle_inbound(irq_ptr->input_qs[q_nr], | 1599 | handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, |
1714 | callflags, bufnr, count); | 1600 | count); |
1715 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) | 1601 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) |
1716 | handle_outbound(irq_ptr->output_qs[q_nr], | 1602 | handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, |
1717 | callflags, bufnr, count); | 1603 | count); |
1718 | else { | 1604 | else |
1719 | QDIO_DBF_TEXT3(1, trace, "doQD:inv"); | ||
1720 | return -EINVAL; | 1605 | return -EINVAL; |
1721 | } | ||
1722 | return 0; | 1606 | return 0; |
1723 | } | 1607 | } |
1724 | EXPORT_SYMBOL_GPL(do_QDIO); | 1608 | EXPORT_SYMBOL_GPL(do_QDIO); |
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index ec5c4a414235..136d0f0b1e93 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c | |||
@@ -74,12 +74,20 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v) | |||
74 | seq_printf(m, "\n"); | 74 | seq_printf(m, "\n"); |
75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", | 75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", |
76 | (long)atomic_long_read(&perf_stats.fast_requeue)); | 76 | (long)atomic_long_read(&perf_stats.fast_requeue)); |
77 | seq_printf(m, "Number of outbound target full condition\t: %li\n", | ||
78 | (long)atomic_long_read(&perf_stats.outbound_target_full)); | ||
77 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", | 79 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", |
78 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); | 80 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); |
79 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", | 81 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", |
80 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); | 82 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); |
81 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", | 83 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", |
82 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); | 84 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); |
85 | seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", | ||
86 | (long)atomic_long_read(&perf_stats.debug_eqbs_all), | ||
87 | (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); | ||
88 | seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", | ||
89 | (long)atomic_long_read(&perf_stats.debug_sqbs_all), | ||
90 | (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); | ||
83 | seq_printf(m, "\n"); | 91 | seq_printf(m, "\n"); |
84 | return 0; | 92 | return 0; |
85 | } | 93 | } |
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h index 5c406a8b7387..7821ac4fa517 100644 --- a/drivers/s390/cio/qdio_perf.h +++ b/drivers/s390/cio/qdio_perf.h | |||
@@ -36,10 +36,15 @@ struct qdio_perf_stats { | |||
36 | atomic_long_t inbound_handler; | 36 | atomic_long_t inbound_handler; |
37 | atomic_long_t outbound_handler; | 37 | atomic_long_t outbound_handler; |
38 | atomic_long_t fast_requeue; | 38 | atomic_long_t fast_requeue; |
39 | atomic_long_t outbound_target_full; | ||
39 | 40 | ||
40 | /* for debugging */ | 41 | /* for debugging */ |
41 | atomic_long_t debug_tl_out_timer; | 42 | atomic_long_t debug_tl_out_timer; |
42 | atomic_long_t debug_stop_polling; | 43 | atomic_long_t debug_stop_polling; |
44 | atomic_long_t debug_eqbs_all; | ||
45 | atomic_long_t debug_eqbs_incomplete; | ||
46 | atomic_long_t debug_sqbs_all; | ||
47 | atomic_long_t debug_sqbs_incomplete; | ||
43 | }; | 48 | }; |
44 | 49 | ||
45 | extern struct qdio_perf_stats perf_stats; | 50 | extern struct qdio_perf_stats perf_stats; |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a0b6b46e7466..c08356b95bf5 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -117,17 +117,16 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
117 | q->mask = 1 << (31 - i); | 117 | q->mask = 1 << (31 - i); |
118 | q->nr = i; | 118 | q->nr = i; |
119 | q->handler = handler; | 119 | q->handler = handler; |
120 | spin_lock_init(&q->lock); | ||
120 | } | 121 | } |
121 | 122 | ||
122 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | 123 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, |
123 | void **sbals_array, char *dbf_text, int i) | 124 | void **sbals_array, int i) |
124 | { | 125 | { |
125 | struct qdio_q *prev; | 126 | struct qdio_q *prev; |
126 | int j; | 127 | int j; |
127 | 128 | ||
128 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 129 | DBF_HEX(&q, sizeof(void *)); |
129 | QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); | ||
130 | |||
131 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); | 130 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); |
132 | 131 | ||
133 | /* fill in sbal */ | 132 | /* fill in sbal */ |
@@ -150,31 +149,26 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
150 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) | 149 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) |
151 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; | 150 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; |
152 | 151 | ||
153 | QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); | 152 | DBF_EVENT("sl-slsb-sbal"); |
154 | QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); | 153 | DBF_HEX(q->sl, sizeof(void *)); |
155 | QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); | 154 | DBF_HEX(&q->slsb, sizeof(void *)); |
156 | QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); | 155 | DBF_HEX(q->sbal, sizeof(void *)); |
157 | } | 156 | } |
158 | 157 | ||
159 | static void setup_queues(struct qdio_irq *irq_ptr, | 158 | static void setup_queues(struct qdio_irq *irq_ptr, |
160 | struct qdio_initialize *qdio_init) | 159 | struct qdio_initialize *qdio_init) |
161 | { | 160 | { |
162 | char dbf_text[20]; | ||
163 | struct qdio_q *q; | 161 | struct qdio_q *q; |
164 | void **input_sbal_array = qdio_init->input_sbal_addr_array; | 162 | void **input_sbal_array = qdio_init->input_sbal_addr_array; |
165 | void **output_sbal_array = qdio_init->output_sbal_addr_array; | 163 | void **output_sbal_array = qdio_init->output_sbal_addr_array; |
166 | int i; | 164 | int i; |
167 | 165 | ||
168 | sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); | ||
169 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
170 | |||
171 | for_each_input_queue(irq_ptr, q, i) { | 166 | for_each_input_queue(irq_ptr, q, i) { |
172 | sprintf(dbf_text, "in-q%4x", i); | 167 | DBF_EVENT("in-q:%1d", i); |
173 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); | 168 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); |
174 | 169 | ||
175 | q->is_input_q = 1; | 170 | q->is_input_q = 1; |
176 | spin_lock_init(&q->u.in.lock); | 171 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); |
177 | setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); | ||
178 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 172 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
179 | 173 | ||
180 | if (is_thinint_irq(irq_ptr)) | 174 | if (is_thinint_irq(irq_ptr)) |
@@ -186,12 +180,11 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
186 | } | 180 | } |
187 | 181 | ||
188 | for_each_output_queue(irq_ptr, q, i) { | 182 | for_each_output_queue(irq_ptr, q, i) { |
189 | sprintf(dbf_text, "outq%4x", i); | 183 | DBF_EVENT("outq:%1d", i); |
190 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); | 184 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); |
191 | 185 | ||
192 | q->is_input_q = 0; | 186 | q->is_input_q = 0; |
193 | setup_storage_lists(q, irq_ptr, output_sbal_array, | 187 | setup_storage_lists(q, irq_ptr, output_sbal_array, i); |
194 | dbf_text, i); | ||
195 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 188 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
196 | 189 | ||
197 | tasklet_init(&q->tasklet, qdio_outbound_processing, | 190 | tasklet_init(&q->tasklet, qdio_outbound_processing, |
@@ -222,8 +215,6 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) | |||
222 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | 215 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, |
223 | unsigned char qdioac, unsigned long token) | 216 | unsigned char qdioac, unsigned long token) |
224 | { | 217 | { |
225 | char dbf_text[15]; | ||
226 | |||
227 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) | 218 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) |
228 | goto no_qebsm; | 219 | goto no_qebsm; |
229 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || | 220 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || |
@@ -232,33 +223,41 @@ static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | |||
232 | 223 | ||
233 | irq_ptr->sch_token = token; | 224 | irq_ptr->sch_token = token; |
234 | 225 | ||
235 | QDIO_DBF_TEXT0(0, setup, "V=V:1"); | 226 | DBF_EVENT("V=V:1"); |
236 | sprintf(dbf_text, "%8lx", irq_ptr->sch_token); | 227 | DBF_EVENT("%8lx", irq_ptr->sch_token); |
237 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
238 | return; | 228 | return; |
239 | 229 | ||
240 | no_qebsm: | 230 | no_qebsm: |
241 | irq_ptr->sch_token = 0; | 231 | irq_ptr->sch_token = 0; |
242 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; | 232 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; |
243 | QDIO_DBF_TEXT0(0, setup, "noV=V"); | 233 | DBF_EVENT("noV=V"); |
244 | } | 234 | } |
245 | 235 | ||
246 | static int __get_ssqd_info(struct qdio_irq *irq_ptr) | 236 | /* |
237 | * If there is a qdio_irq we use the chsc_page and store the information | ||
238 | * in the qdio_irq, otherwise we copy it to the specified structure. | ||
239 | */ | ||
240 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
241 | struct subchannel_id *schid, | ||
242 | struct qdio_ssqd_desc *data) | ||
247 | { | 243 | { |
248 | struct chsc_ssqd_area *ssqd; | 244 | struct chsc_ssqd_area *ssqd; |
249 | int rc; | 245 | int rc; |
250 | 246 | ||
251 | QDIO_DBF_TEXT0(0, setup, "getssqd"); | 247 | DBF_EVENT("getssqd:%4x", schid->sch_no); |
252 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | 248 | if (irq_ptr != NULL) |
249 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | ||
250 | else | ||
251 | ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); | ||
253 | memset(ssqd, 0, PAGE_SIZE); | 252 | memset(ssqd, 0, PAGE_SIZE); |
254 | 253 | ||
255 | ssqd->request = (struct chsc_header) { | 254 | ssqd->request = (struct chsc_header) { |
256 | .length = 0x0010, | 255 | .length = 0x0010, |
257 | .code = 0x0024, | 256 | .code = 0x0024, |
258 | }; | 257 | }; |
259 | ssqd->first_sch = irq_ptr->schid.sch_no; | 258 | ssqd->first_sch = schid->sch_no; |
260 | ssqd->last_sch = irq_ptr->schid.sch_no; | 259 | ssqd->last_sch = schid->sch_no; |
261 | ssqd->ssid = irq_ptr->schid.ssid; | 260 | ssqd->ssid = schid->ssid; |
262 | 261 | ||
263 | if (chsc(ssqd)) | 262 | if (chsc(ssqd)) |
264 | return -EIO; | 263 | return -EIO; |
@@ -268,27 +267,29 @@ static int __get_ssqd_info(struct qdio_irq *irq_ptr) | |||
268 | 267 | ||
269 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || | 268 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || |
270 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || | 269 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || |
271 | (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) | 270 | (ssqd->qdio_ssqd.sch != schid->sch_no)) |
272 | return -EINVAL; | 271 | return -EINVAL; |
273 | 272 | ||
274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, | 273 | if (irq_ptr != NULL) |
275 | sizeof(struct qdio_ssqd_desc)); | 274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, |
275 | sizeof(struct qdio_ssqd_desc)); | ||
276 | else { | ||
277 | memcpy(data, &ssqd->qdio_ssqd, | ||
278 | sizeof(struct qdio_ssqd_desc)); | ||
279 | free_page((unsigned long)ssqd); | ||
280 | } | ||
276 | return 0; | 281 | return 0; |
277 | } | 282 | } |
278 | 283 | ||
279 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | 284 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) |
280 | { | 285 | { |
281 | unsigned char qdioac; | 286 | unsigned char qdioac; |
282 | char dbf_text[15]; | ||
283 | int rc; | 287 | int rc; |
284 | 288 | ||
285 | rc = __get_ssqd_info(irq_ptr); | 289 | rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); |
286 | if (rc) { | 290 | if (rc) { |
287 | QDIO_DBF_TEXT2(0, setup, "ssqdasig"); | 291 | DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); |
288 | sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); | 292 | DBF_ERROR("rc:%x", rc); |
289 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
290 | sprintf(dbf_text, "rc:%d", rc); | ||
291 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
292 | /* all flags set, worst case */ | 293 | /* all flags set, worst case */ |
293 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | | 294 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | |
294 | AC1_SIGA_SYNC_NEEDED; | 295 | AC1_SIGA_SYNC_NEEDED; |
@@ -297,9 +298,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | |||
297 | 298 | ||
298 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); | 299 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); |
299 | process_ac_flags(irq_ptr, qdioac); | 300 | process_ac_flags(irq_ptr, qdioac); |
300 | 301 | DBF_EVENT("qdioac:%4x", qdioac); | |
301 | sprintf(dbf_text, "qdioac%2x", qdioac); | ||
302 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
303 | } | 302 | } |
304 | 303 | ||
305 | void qdio_release_memory(struct qdio_irq *irq_ptr) | 304 | void qdio_release_memory(struct qdio_irq *irq_ptr) |
@@ -419,7 +418,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
419 | /* get qdio commands */ | 418 | /* get qdio commands */ |
420 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | 419 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); |
421 | if (!ciw) { | 420 | if (!ciw) { |
422 | QDIO_DBF_TEXT2(1, setup, "no eq"); | 421 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); |
423 | rc = -EINVAL; | 422 | rc = -EINVAL; |
424 | goto out_err; | 423 | goto out_err; |
425 | } | 424 | } |
@@ -427,7 +426,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
427 | 426 | ||
428 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | 427 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); |
429 | if (!ciw) { | 428 | if (!ciw) { |
430 | QDIO_DBF_TEXT2(1, setup, "no aq"); | 429 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); |
431 | rc = -EINVAL; | 430 | rc = -EINVAL; |
432 | goto out_err; | 431 | goto out_err; |
433 | } | 432 | } |
@@ -447,56 +446,38 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 446 | { |
448 | char s[80]; | 447 | char s[80]; |
449 | 448 | ||
450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); | 449 | snprintf(s, 80, "qdio: %s %s on SC %x using " |
451 | switch (irq_ptr->qib.qfmt) { | 450 | "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", |
452 | case QDIO_QETH_QFMT: | 451 | dev_name(&cdev->dev), |
453 | sprintf(s + strlen(s), "OSA "); | 452 | (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : |
454 | break; | 453 | ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), |
455 | case QDIO_ZFCP_QFMT: | 454 | irq_ptr->schid.sch_no, |
456 | sprintf(s + strlen(s), "ZFCP "); | 455 | is_thinint_irq(irq_ptr), |
457 | break; | 456 | (irq_ptr->sch_token) ? 1 : 0, |
458 | case QDIO_IQDIO_QFMT: | 457 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, |
459 | sprintf(s + strlen(s), "HS "); | 458 | css_general_characteristics.aif_tdd, |
460 | break; | 459 | (irq_ptr->siga_flag.input) ? "R" : " ", |
461 | } | 460 | (irq_ptr->siga_flag.output) ? "W" : " ", |
462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); | 461 | (irq_ptr->siga_flag.sync) ? "S" : " ", |
463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | 462 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", |
464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); | 463 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", |
465 | sprintf(s + strlen(s), "PCI:%d ", | 464 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); |
466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); | ||
467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); | ||
468 | sprintf(s + strlen(s), "SIGA:"); | ||
469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); | ||
470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); | ||
471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); | ||
472 | sprintf(s + strlen(s), "%s", | ||
473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); | ||
474 | sprintf(s + strlen(s), "%s", | ||
475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); | ||
476 | sprintf(s + strlen(s), "%s", | ||
477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | ||
478 | sprintf(s + strlen(s), "\n"); | ||
479 | printk(KERN_INFO "%s", s); | 465 | printk(KERN_INFO "%s", s); |
480 | } | 466 | } |
481 | 467 | ||
482 | int __init qdio_setup_init(void) | 468 | int __init qdio_setup_init(void) |
483 | { | 469 | { |
484 | char dbf_text[15]; | ||
485 | |||
486 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), | 470 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), |
487 | 256, 0, NULL); | 471 | 256, 0, NULL); |
488 | if (!qdio_q_cache) | 472 | if (!qdio_q_cache) |
489 | return -ENOMEM; | 473 | return -ENOMEM; |
490 | 474 | ||
491 | /* Check for OSA/FCP thin interrupts (bit 67). */ | 475 | /* Check for OSA/FCP thin interrupts (bit 67). */ |
492 | sprintf(dbf_text, "thini%1x", | 476 | DBF_EVENT("thinint:%1d", |
493 | (css_general_characteristics.aif_osa) ? 1 : 0); | 477 | (css_general_characteristics.aif_osa) ? 1 : 0); |
494 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
495 | 478 | ||
496 | /* Check for QEBSM support in general (bit 58). */ | 479 | /* Check for QEBSM support in general (bit 58). */ |
497 | sprintf(dbf_text, "cssQBS:%1x", | 480 | DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); |
498 | (qebsm_possible()) ? 1 : 0); | ||
499 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
500 | return 0; | 481 | return 0; |
501 | } | 482 | } |
502 | 483 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index ea7f61400267..8e90e147b746 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -125,13 +125,13 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
125 | 125 | ||
126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | 126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) |
127 | { | 127 | { |
128 | unsigned char state; | 128 | unsigned char state = 0; |
129 | 129 | ||
130 | if (!atomic_read(&q->nr_buf_used)) | 130 | if (!atomic_read(&q->nr_buf_used)) |
131 | return 1; | 131 | return 1; |
132 | 132 | ||
133 | qdio_siga_sync_q(q); | 133 | qdio_siga_sync_q(q); |
134 | get_buf_state(q, q->first_to_check, &state); | 134 | get_buf_state(q, q->first_to_check, &state, 0); |
135 | 135 | ||
136 | if (state == SLSB_P_INPUT_PRIMED) | 136 | if (state == SLSB_P_INPUT_PRIMED) |
137 | /* more work coming */ | 137 | /* more work coming */ |
@@ -258,8 +258,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) | |||
258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | 258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) |
259 | { | 259 | { |
260 | struct scssc_area *scssc_area; | 260 | struct scssc_area *scssc_area; |
261 | char dbf_text[15]; | ||
262 | void *ptr; | ||
263 | int rc; | 261 | int rc; |
264 | 262 | ||
265 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; | 263 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; |
@@ -294,19 +292,15 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | |||
294 | 292 | ||
295 | rc = chsc_error_from_response(scssc_area->response.code); | 293 | rc = chsc_error_from_response(scssc_area->response.code); |
296 | if (rc) { | 294 | if (rc) { |
297 | sprintf(dbf_text, "sidR%4x", scssc_area->response.code); | 295 | DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, |
298 | QDIO_DBF_TEXT1(0, trace, dbf_text); | 296 | scssc_area->response.code); |
299 | QDIO_DBF_TEXT1(0, setup, dbf_text); | 297 | DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); |
300 | ptr = &scssc_area->response; | ||
301 | QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); | ||
302 | return rc; | 298 | return rc; |
303 | } | 299 | } |
304 | 300 | ||
305 | QDIO_DBF_TEXT2(0, setup, "setscind"); | 301 | DBF_EVENT("setscind"); |
306 | QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, | 302 | DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); |
307 | sizeof(unsigned long)); | 303 | DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); |
308 | QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, | ||
309 | sizeof(unsigned long)); | ||
310 | return 0; | 304 | return 0; |
311 | } | 305 | } |
312 | 306 | ||
@@ -327,14 +321,11 @@ void tiqdio_free_memory(void) | |||
327 | 321 | ||
328 | int __init tiqdio_register_thinints(void) | 322 | int __init tiqdio_register_thinints(void) |
329 | { | 323 | { |
330 | char dbf_text[20]; | ||
331 | |||
332 | isc_register(QDIO_AIRQ_ISC); | 324 | isc_register(QDIO_AIRQ_ISC); |
333 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, | 325 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, |
334 | NULL, QDIO_AIRQ_ISC); | 326 | NULL, QDIO_AIRQ_ISC); |
335 | if (IS_ERR(tiqdio_alsi)) { | 327 | if (IS_ERR(tiqdio_alsi)) { |
336 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); | 328 | DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); |
337 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
338 | tiqdio_alsi = NULL; | 329 | tiqdio_alsi = NULL; |
339 | isc_unregister(QDIO_AIRQ_ISC); | 330 | isc_unregister(QDIO_AIRQ_ISC); |
340 | return -ENOMEM; | 331 | return -ENOMEM; |
@@ -360,7 +351,7 @@ void qdio_setup_thinint(struct qdio_irq *irq_ptr) | |||
360 | if (!is_thinint_irq(irq_ptr)) | 351 | if (!is_thinint_irq(irq_ptr)) |
361 | return; | 352 | return; |
362 | irq_ptr->dsci = get_indicator(); | 353 | irq_ptr->dsci = get_indicator(); |
363 | QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); | 354 | DBF_HEX(&irq_ptr->dsci, sizeof(void *)); |
364 | } | 355 | } |
365 | 356 | ||
366 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) | 357 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e3fe6838293a..1f5f5d2d87d9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus. | 10 | * Adjunct processor bus. |
10 | * | 11 | * |
@@ -23,6 +24,9 @@ | |||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 | */ | 25 | */ |
25 | 26 | ||
27 | #define KMSG_COMPONENT "ap" | ||
28 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
29 | |||
26 | #include <linux/module.h> | 30 | #include <linux/module.h> |
27 | #include <linux/init.h> | 31 | #include <linux/init.h> |
28 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
@@ -34,6 +38,10 @@ | |||
34 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
35 | #include <asm/s390_rdev.h> | 39 | #include <asm/s390_rdev.h> |
36 | #include <asm/reset.h> | 40 | #include <asm/reset.h> |
41 | #include <asm/airq.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/system.h> | ||
44 | #include <asm/isc.h> | ||
37 | #include <linux/hrtimer.h> | 45 | #include <linux/hrtimer.h> |
38 | #include <linux/ktime.h> | 46 | #include <linux/ktime.h> |
39 | 47 | ||
@@ -46,6 +54,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); | |||
46 | static int ap_poll_thread_start(void); | 54 | static int ap_poll_thread_start(void); |
47 | static void ap_poll_thread_stop(void); | 55 | static void ap_poll_thread_stop(void); |
48 | static void ap_request_timeout(unsigned long); | 56 | static void ap_request_timeout(unsigned long); |
57 | static inline void ap_schedule_poll_timer(void); | ||
49 | 58 | ||
50 | /* | 59 | /* |
51 | * Module description. | 60 | * Module description. |
@@ -68,7 +77,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000); | |||
68 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); | 77 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); |
69 | 78 | ||
70 | static struct device *ap_root_device = NULL; | 79 | static struct device *ap_root_device = NULL; |
71 | static DEFINE_SPINLOCK(ap_device_lock); | 80 | static DEFINE_SPINLOCK(ap_device_list_lock); |
72 | static LIST_HEAD(ap_device_list); | 81 | static LIST_HEAD(ap_device_list); |
73 | 82 | ||
74 | /* | 83 | /* |
@@ -80,19 +89,29 @@ static int ap_config_time = AP_CONFIG_TIME; | |||
80 | static DECLARE_WORK(ap_config_work, ap_scan_bus); | 89 | static DECLARE_WORK(ap_config_work, ap_scan_bus); |
81 | 90 | ||
82 | /* | 91 | /* |
83 | * Tasklet & timer for AP request polling. | 92 | * Tasklet & timer for AP request polling and interrupts |
84 | */ | 93 | */ |
85 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); | 94 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); |
86 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); | 95 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); |
87 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); | 96 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); |
88 | static struct task_struct *ap_poll_kthread = NULL; | 97 | static struct task_struct *ap_poll_kthread = NULL; |
89 | static DEFINE_MUTEX(ap_poll_thread_mutex); | 98 | static DEFINE_MUTEX(ap_poll_thread_mutex); |
99 | static void *ap_interrupt_indicator; | ||
90 | static struct hrtimer ap_poll_timer; | 100 | static struct hrtimer ap_poll_timer; |
91 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. | 101 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. |
92 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | 102 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ |
93 | static unsigned long long poll_timeout = 250000; | 103 | static unsigned long long poll_timeout = 250000; |
94 | 104 | ||
95 | /** | 105 | /** |
106 | * ap_using_interrupts() - Returns non-zero if interrupt support is | ||
107 | * available. | ||
108 | */ | ||
109 | static inline int ap_using_interrupts(void) | ||
110 | { | ||
111 | return ap_interrupt_indicator != NULL; | ||
112 | } | ||
113 | |||
114 | /** | ||
96 | * ap_intructions_available() - Test if AP instructions are available. | 115 | * ap_intructions_available() - Test if AP instructions are available. |
97 | * | 116 | * |
98 | * Returns 0 if the AP instructions are installed. | 117 | * Returns 0 if the AP instructions are installed. |
@@ -113,6 +132,23 @@ static inline int ap_instructions_available(void) | |||
113 | } | 132 | } |
114 | 133 | ||
115 | /** | 134 | /** |
135 | * ap_interrupts_available(): Test if AP interrupts are available. | ||
136 | * | ||
137 | * Returns 1 if AP interrupts are available. | ||
138 | */ | ||
139 | static int ap_interrupts_available(void) | ||
140 | { | ||
141 | unsigned long long facility_bits[2]; | ||
142 | |||
143 | if (stfle(facility_bits, 2) <= 1) | ||
144 | return 0; | ||
145 | if (!(facility_bits[0] & (1ULL << 61)) || | ||
146 | !(facility_bits[1] & (1ULL << 62))) | ||
147 | return 0; | ||
148 | return 1; | ||
149 | } | ||
150 | |||
151 | /** | ||
116 | * ap_test_queue(): Test adjunct processor queue. | 152 | * ap_test_queue(): Test adjunct processor queue. |
117 | * @qid: The AP queue number | 153 | * @qid: The AP queue number |
118 | * @queue_depth: Pointer to queue depth value | 154 | * @queue_depth: Pointer to queue depth value |
@@ -152,6 +188,80 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) | |||
152 | return reg1; | 188 | return reg1; |
153 | } | 189 | } |
154 | 190 | ||
191 | #ifdef CONFIG_64BIT | ||
192 | /** | ||
193 | * ap_queue_interruption_control(): Enable interruption for a specific AP. | ||
194 | * @qid: The AP queue number | ||
195 | * @ind: The notification indicator byte | ||
196 | * | ||
197 | * Returns AP queue status. | ||
198 | */ | ||
199 | static inline struct ap_queue_status | ||
200 | ap_queue_interruption_control(ap_qid_t qid, void *ind) | ||
201 | { | ||
202 | register unsigned long reg0 asm ("0") = qid | 0x03000000UL; | ||
203 | register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; | ||
204 | register struct ap_queue_status reg1_out asm ("1"); | ||
205 | register void *reg2 asm ("2") = ind; | ||
206 | asm volatile( | ||
207 | ".long 0xb2af0000" /* PQAP(RAPQ) */ | ||
208 | : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) | ||
209 | : | ||
210 | : "cc" ); | ||
211 | return reg1_out; | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /** | ||
216 | * ap_queue_enable_interruption(): Enable interruption on an AP. | ||
217 | * @qid: The AP queue number | ||
218 | * @ind: the notification indicator byte | ||
219 | * | ||
220 | * Enables interruption on AP queue via ap_queue_interruption_control(). Based | ||
221 | * on the return value it waits a while and tests the AP queue if interrupts | ||
222 | * have been switched on using ap_test_queue(). | ||
223 | */ | ||
224 | static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) | ||
225 | { | ||
226 | #ifdef CONFIG_64BIT | ||
227 | struct ap_queue_status status; | ||
228 | int t_depth, t_device_type, rc, i; | ||
229 | |||
230 | rc = -EBUSY; | ||
231 | status = ap_queue_interruption_control(qid, ind); | ||
232 | |||
233 | for (i = 0; i < AP_MAX_RESET; i++) { | ||
234 | switch (status.response_code) { | ||
235 | case AP_RESPONSE_NORMAL: | ||
236 | if (status.int_enabled) | ||
237 | return 0; | ||
238 | break; | ||
239 | case AP_RESPONSE_RESET_IN_PROGRESS: | ||
240 | case AP_RESPONSE_BUSY: | ||
241 | break; | ||
242 | case AP_RESPONSE_Q_NOT_AVAIL: | ||
243 | case AP_RESPONSE_DECONFIGURED: | ||
244 | case AP_RESPONSE_CHECKSTOPPED: | ||
245 | case AP_RESPONSE_INVALID_ADDRESS: | ||
246 | return -ENODEV; | ||
247 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
248 | if (status.int_enabled) | ||
249 | return 0; | ||
250 | break; | ||
251 | default: | ||
252 | break; | ||
253 | } | ||
254 | if (i < AP_MAX_RESET - 1) { | ||
255 | udelay(5); | ||
256 | status = ap_test_queue(qid, &t_depth, &t_device_type); | ||
257 | } | ||
258 | } | ||
259 | return rc; | ||
260 | #else | ||
261 | return -EINVAL; | ||
262 | #endif | ||
263 | } | ||
264 | |||
155 | /** | 265 | /** |
156 | * __ap_send(): Send message to adjunct processor queue. | 266 | * __ap_send(): Send message to adjunct processor queue. |
157 | * @qid: The AP queue number | 267 | * @qid: The AP queue number |
@@ -295,6 +405,11 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) | |||
295 | case AP_RESPONSE_CHECKSTOPPED: | 405 | case AP_RESPONSE_CHECKSTOPPED: |
296 | rc = -ENODEV; | 406 | rc = -ENODEV; |
297 | break; | 407 | break; |
408 | case AP_RESPONSE_INVALID_ADDRESS: | ||
409 | rc = -ENODEV; | ||
410 | break; | ||
411 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
412 | break; | ||
298 | case AP_RESPONSE_BUSY: | 413 | case AP_RESPONSE_BUSY: |
299 | break; | 414 | break; |
300 | default: | 415 | default: |
@@ -345,6 +460,15 @@ static int ap_init_queue(ap_qid_t qid) | |||
345 | status = ap_test_queue(qid, &dummy, &dummy); | 460 | status = ap_test_queue(qid, &dummy, &dummy); |
346 | } | 461 | } |
347 | } | 462 | } |
463 | if (rc == 0 && ap_using_interrupts()) { | ||
464 | rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); | ||
465 | /* If interruption mode is supported by the machine, | ||
466 | * but an AP can not be enabled for interruption then | ||
467 | * the AP will be discarded. */ | ||
468 | if (rc) | ||
469 | pr_err("Registering adapter interrupts for " | ||
470 | "AP %d failed\n", AP_QID_DEVICE(qid)); | ||
471 | } | ||
348 | return rc; | 472 | return rc; |
349 | } | 473 | } |
350 | 474 | ||
@@ -397,16 +521,16 @@ static ssize_t ap_hwtype_show(struct device *dev, | |||
397 | struct ap_device *ap_dev = to_ap_dev(dev); | 521 | struct ap_device *ap_dev = to_ap_dev(dev); |
398 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); | 522 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); |
399 | } | 523 | } |
400 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
401 | 524 | ||
525 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
402 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, | 526 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, |
403 | char *buf) | 527 | char *buf) |
404 | { | 528 | { |
405 | struct ap_device *ap_dev = to_ap_dev(dev); | 529 | struct ap_device *ap_dev = to_ap_dev(dev); |
406 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); | 530 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); |
407 | } | 531 | } |
408 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
409 | 532 | ||
533 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
410 | static ssize_t ap_request_count_show(struct device *dev, | 534 | static ssize_t ap_request_count_show(struct device *dev, |
411 | struct device_attribute *attr, | 535 | struct device_attribute *attr, |
412 | char *buf) | 536 | char *buf) |
@@ -509,9 +633,9 @@ static int ap_device_probe(struct device *dev) | |||
509 | ap_dev->drv = ap_drv; | 633 | ap_dev->drv = ap_drv; |
510 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 634 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
511 | if (!rc) { | 635 | if (!rc) { |
512 | spin_lock_bh(&ap_device_lock); | 636 | spin_lock_bh(&ap_device_list_lock); |
513 | list_add(&ap_dev->list, &ap_device_list); | 637 | list_add(&ap_dev->list, &ap_device_list); |
514 | spin_unlock_bh(&ap_device_lock); | 638 | spin_unlock_bh(&ap_device_list_lock); |
515 | } | 639 | } |
516 | return rc; | 640 | return rc; |
517 | } | 641 | } |
@@ -553,9 +677,9 @@ static int ap_device_remove(struct device *dev) | |||
553 | 677 | ||
554 | ap_flush_queue(ap_dev); | 678 | ap_flush_queue(ap_dev); |
555 | del_timer_sync(&ap_dev->timeout); | 679 | del_timer_sync(&ap_dev->timeout); |
556 | spin_lock_bh(&ap_device_lock); | 680 | spin_lock_bh(&ap_device_list_lock); |
557 | list_del_init(&ap_dev->list); | 681 | list_del_init(&ap_dev->list); |
558 | spin_unlock_bh(&ap_device_lock); | 682 | spin_unlock_bh(&ap_device_list_lock); |
559 | if (ap_drv->remove) | 683 | if (ap_drv->remove) |
560 | ap_drv->remove(ap_dev); | 684 | ap_drv->remove(ap_dev); |
561 | spin_lock_bh(&ap_dev->lock); | 685 | spin_lock_bh(&ap_dev->lock); |
@@ -599,6 +723,14 @@ static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) | |||
599 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); | 723 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); |
600 | } | 724 | } |
601 | 725 | ||
726 | static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) | ||
727 | { | ||
728 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
729 | ap_using_interrupts() ? 1 : 0); | ||
730 | } | ||
731 | |||
732 | static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); | ||
733 | |||
602 | static ssize_t ap_config_time_store(struct bus_type *bus, | 734 | static ssize_t ap_config_time_store(struct bus_type *bus, |
603 | const char *buf, size_t count) | 735 | const char *buf, size_t count) |
604 | { | 736 | { |
@@ -653,7 +785,8 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, | |||
653 | ktime_t hr_time; | 785 | ktime_t hr_time; |
654 | 786 | ||
655 | /* 120 seconds = maximum poll interval */ | 787 | /* 120 seconds = maximum poll interval */ |
656 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) | 788 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || |
789 | time > 120000000000ULL) | ||
657 | return -EINVAL; | 790 | return -EINVAL; |
658 | poll_timeout = time; | 791 | poll_timeout = time; |
659 | hr_time = ktime_set(0, poll_timeout); | 792 | hr_time = ktime_set(0, poll_timeout); |
@@ -672,6 +805,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { | |||
672 | &bus_attr_ap_domain, | 805 | &bus_attr_ap_domain, |
673 | &bus_attr_config_time, | 806 | &bus_attr_config_time, |
674 | &bus_attr_poll_thread, | 807 | &bus_attr_poll_thread, |
808 | &bus_attr_ap_interrupts, | ||
675 | &bus_attr_poll_timeout, | 809 | &bus_attr_poll_timeout, |
676 | NULL, | 810 | NULL, |
677 | }; | 811 | }; |
@@ -814,6 +948,11 @@ out: | |||
814 | return rc; | 948 | return rc; |
815 | } | 949 | } |
816 | 950 | ||
951 | static void ap_interrupt_handler(void *unused1, void *unused2) | ||
952 | { | ||
953 | tasklet_schedule(&ap_tasklet); | ||
954 | } | ||
955 | |||
817 | /** | 956 | /** |
818 | * __ap_scan_bus(): Scan the AP bus. | 957 | * __ap_scan_bus(): Scan the AP bus. |
819 | * @dev: Pointer to device | 958 | * @dev: Pointer to device |
@@ -928,6 +1067,8 @@ ap_config_timeout(unsigned long ptr) | |||
928 | */ | 1067 | */ |
929 | static inline void ap_schedule_poll_timer(void) | 1068 | static inline void ap_schedule_poll_timer(void) |
930 | { | 1069 | { |
1070 | if (ap_using_interrupts()) | ||
1071 | return; | ||
931 | if (hrtimer_is_queued(&ap_poll_timer)) | 1072 | if (hrtimer_is_queued(&ap_poll_timer)) |
932 | return; | 1073 | return; |
933 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), | 1074 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), |
@@ -1181,7 +1322,7 @@ static void ap_reset(struct ap_device *ap_dev) | |||
1181 | ap_dev->unregistered = 1; | 1322 | ap_dev->unregistered = 1; |
1182 | } | 1323 | } |
1183 | 1324 | ||
1184 | static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) | 1325 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) |
1185 | { | 1326 | { |
1186 | spin_lock(&ap_dev->lock); | 1327 | spin_lock(&ap_dev->lock); |
1187 | if (!ap_dev->unregistered) { | 1328 | if (!ap_dev->unregistered) { |
@@ -1207,13 +1348,19 @@ static void ap_poll_all(unsigned long dummy) | |||
1207 | unsigned long flags; | 1348 | unsigned long flags; |
1208 | struct ap_device *ap_dev; | 1349 | struct ap_device *ap_dev; |
1209 | 1350 | ||
1351 | /* Reset the indicator if interrupts are used. Thus new interrupts can | ||
1352 | * be received. Doing it in the beginning of the tasklet is therefor | ||
1353 | * important that no requests on any AP get lost. | ||
1354 | */ | ||
1355 | if (ap_using_interrupts()) | ||
1356 | xchg((u8 *)ap_interrupt_indicator, 0); | ||
1210 | do { | 1357 | do { |
1211 | flags = 0; | 1358 | flags = 0; |
1212 | spin_lock(&ap_device_lock); | 1359 | spin_lock(&ap_device_list_lock); |
1213 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1360 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1214 | __ap_poll_all(ap_dev, &flags); | 1361 | __ap_poll_device(ap_dev, &flags); |
1215 | } | 1362 | } |
1216 | spin_unlock(&ap_device_lock); | 1363 | spin_unlock(&ap_device_list_lock); |
1217 | } while (flags & 1); | 1364 | } while (flags & 1); |
1218 | if (flags & 2) | 1365 | if (flags & 2) |
1219 | ap_schedule_poll_timer(); | 1366 | ap_schedule_poll_timer(); |
@@ -1253,11 +1400,11 @@ static int ap_poll_thread(void *data) | |||
1253 | remove_wait_queue(&ap_poll_wait, &wait); | 1400 | remove_wait_queue(&ap_poll_wait, &wait); |
1254 | 1401 | ||
1255 | flags = 0; | 1402 | flags = 0; |
1256 | spin_lock_bh(&ap_device_lock); | 1403 | spin_lock_bh(&ap_device_list_lock); |
1257 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1404 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1258 | __ap_poll_all(ap_dev, &flags); | 1405 | __ap_poll_device(ap_dev, &flags); |
1259 | } | 1406 | } |
1260 | spin_unlock_bh(&ap_device_lock); | 1407 | spin_unlock_bh(&ap_device_list_lock); |
1261 | } | 1408 | } |
1262 | set_current_state(TASK_RUNNING); | 1409 | set_current_state(TASK_RUNNING); |
1263 | remove_wait_queue(&ap_poll_wait, &wait); | 1410 | remove_wait_queue(&ap_poll_wait, &wait); |
@@ -1268,6 +1415,8 @@ static int ap_poll_thread_start(void) | |||
1268 | { | 1415 | { |
1269 | int rc; | 1416 | int rc; |
1270 | 1417 | ||
1418 | if (ap_using_interrupts()) | ||
1419 | return 0; | ||
1271 | mutex_lock(&ap_poll_thread_mutex); | 1420 | mutex_lock(&ap_poll_thread_mutex); |
1272 | if (!ap_poll_kthread) { | 1421 | if (!ap_poll_kthread) { |
1273 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); | 1422 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); |
@@ -1301,8 +1450,12 @@ static void ap_request_timeout(unsigned long data) | |||
1301 | { | 1450 | { |
1302 | struct ap_device *ap_dev = (struct ap_device *) data; | 1451 | struct ap_device *ap_dev = (struct ap_device *) data; |
1303 | 1452 | ||
1304 | if (ap_dev->reset == AP_RESET_ARMED) | 1453 | if (ap_dev->reset == AP_RESET_ARMED) { |
1305 | ap_dev->reset = AP_RESET_DO; | 1454 | ap_dev->reset = AP_RESET_DO; |
1455 | |||
1456 | if (ap_using_interrupts()) | ||
1457 | tasklet_schedule(&ap_tasklet); | ||
1458 | } | ||
1306 | } | 1459 | } |
1307 | 1460 | ||
1308 | static void ap_reset_domain(void) | 1461 | static void ap_reset_domain(void) |
@@ -1337,14 +1490,25 @@ int __init ap_module_init(void) | |||
1337 | int rc, i; | 1490 | int rc, i; |
1338 | 1491 | ||
1339 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { | 1492 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { |
1340 | printk(KERN_WARNING "Invalid param: domain = %d. " | 1493 | pr_warning("%d is not a valid cryptographic domain\n", |
1341 | " Not loading.\n", ap_domain_index); | 1494 | ap_domain_index); |
1342 | return -EINVAL; | 1495 | return -EINVAL; |
1343 | } | 1496 | } |
1344 | if (ap_instructions_available() != 0) { | 1497 | if (ap_instructions_available() != 0) { |
1345 | printk(KERN_WARNING "AP instructions not installed.\n"); | 1498 | pr_warning("The hardware system does not support " |
1499 | "AP instructions\n"); | ||
1346 | return -ENODEV; | 1500 | return -ENODEV; |
1347 | } | 1501 | } |
1502 | if (ap_interrupts_available()) { | ||
1503 | isc_register(AP_ISC); | ||
1504 | ap_interrupt_indicator = s390_register_adapter_interrupt( | ||
1505 | &ap_interrupt_handler, NULL, AP_ISC); | ||
1506 | if (IS_ERR(ap_interrupt_indicator)) { | ||
1507 | ap_interrupt_indicator = NULL; | ||
1508 | isc_unregister(AP_ISC); | ||
1509 | } | ||
1510 | } | ||
1511 | |||
1348 | register_reset_call(&ap_reset_call); | 1512 | register_reset_call(&ap_reset_call); |
1349 | 1513 | ||
1350 | /* Create /sys/bus/ap. */ | 1514 | /* Create /sys/bus/ap. */ |
@@ -1408,6 +1572,10 @@ out_bus: | |||
1408 | bus_unregister(&ap_bus_type); | 1572 | bus_unregister(&ap_bus_type); |
1409 | out: | 1573 | out: |
1410 | unregister_reset_call(&ap_reset_call); | 1574 | unregister_reset_call(&ap_reset_call); |
1575 | if (ap_using_interrupts()) { | ||
1576 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1577 | isc_unregister(AP_ISC); | ||
1578 | } | ||
1411 | return rc; | 1579 | return rc; |
1412 | } | 1580 | } |
1413 | 1581 | ||
@@ -1443,6 +1611,10 @@ void ap_module_exit(void) | |||
1443 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); | 1611 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); |
1444 | bus_unregister(&ap_bus_type); | 1612 | bus_unregister(&ap_bus_type); |
1445 | unregister_reset_call(&ap_reset_call); | 1613 | unregister_reset_call(&ap_reset_call); |
1614 | if (ap_using_interrupts()) { | ||
1615 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1616 | isc_unregister(AP_ISC); | ||
1617 | } | ||
1446 | } | 1618 | } |
1447 | 1619 | ||
1448 | #ifndef CONFIG_ZCRYPT_MONOLITHIC | 1620 | #ifndef CONFIG_ZCRYPT_MONOLITHIC |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 446378b308fc..a35362241805 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus header file. | 10 | * Adjunct processor bus header file. |
10 | * | 11 | * |
@@ -67,7 +68,8 @@ struct ap_queue_status { | |||
67 | unsigned int queue_empty : 1; | 68 | unsigned int queue_empty : 1; |
68 | unsigned int replies_waiting : 1; | 69 | unsigned int replies_waiting : 1; |
69 | unsigned int queue_full : 1; | 70 | unsigned int queue_full : 1; |
70 | unsigned int pad1 : 5; | 71 | unsigned int pad1 : 4; |
72 | unsigned int int_enabled : 1; | ||
71 | unsigned int response_code : 8; | 73 | unsigned int response_code : 8; |
72 | unsigned int pad2 : 16; | 74 | unsigned int pad2 : 16; |
73 | }; | 75 | }; |
@@ -78,6 +80,8 @@ struct ap_queue_status { | |||
78 | #define AP_RESPONSE_DECONFIGURED 0x03 | 80 | #define AP_RESPONSE_DECONFIGURED 0x03 |
79 | #define AP_RESPONSE_CHECKSTOPPED 0x04 | 81 | #define AP_RESPONSE_CHECKSTOPPED 0x04 |
80 | #define AP_RESPONSE_BUSY 0x05 | 82 | #define AP_RESPONSE_BUSY 0x05 |
83 | #define AP_RESPONSE_INVALID_ADDRESS 0x06 | ||
84 | #define AP_RESPONSE_OTHERWISE_CHANGED 0x07 | ||
81 | #define AP_RESPONSE_Q_FULL 0x10 | 85 | #define AP_RESPONSE_Q_FULL 0x10 |
82 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 | 86 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 |
83 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 | 87 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 |
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 54f4cbc3be9e..326ea08f67c9 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c | |||
@@ -264,17 +264,21 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, | |||
264 | .type = TYPE82_RSP_CODE, | 264 | .type = TYPE82_RSP_CODE, |
265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
266 | }; | 266 | }; |
267 | struct type80_hdr *t80h = reply->message; | 267 | struct type80_hdr *t80h; |
268 | int length; | 268 | int length; |
269 | 269 | ||
270 | /* Copy the reply message to the request message buffer. */ | 270 | /* Copy the reply message to the request message buffer. */ |
271 | if (IS_ERR(reply)) | 271 | if (IS_ERR(reply)) { |
272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
273 | else if (t80h->type == TYPE80_RSP_CODE) { | 273 | goto out; |
274 | } | ||
275 | t80h = reply->message; | ||
276 | if (t80h->type == TYPE80_RSP_CODE) { | ||
274 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); | 277 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); |
275 | memcpy(msg->message, reply->message, length); | 278 | memcpy(msg->message, reply->message, length); |
276 | } else | 279 | } else |
277 | memcpy(msg->message, reply->message, sizeof error_reply); | 280 | memcpy(msg->message, reply->message, sizeof error_reply); |
281 | out: | ||
278 | complete((struct completion *) msg->private); | 282 | complete((struct completion *) msg->private); |
279 | } | 283 | } |
280 | 284 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 12da4815ba8e..17ba81b58c78 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -247,17 +247,21 @@ static void zcrypt_pcica_receive(struct ap_device *ap_dev, | |||
247 | .type = TYPE82_RSP_CODE, | 247 | .type = TYPE82_RSP_CODE, |
248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
249 | }; | 249 | }; |
250 | struct type84_hdr *t84h = reply->message; | 250 | struct type84_hdr *t84h; |
251 | int length; | 251 | int length; |
252 | 252 | ||
253 | /* Copy the reply message to the request message buffer. */ | 253 | /* Copy the reply message to the request message buffer. */ |
254 | if (IS_ERR(reply)) | 254 | if (IS_ERR(reply)) { |
255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
256 | else if (t84h->code == TYPE84_RSP_CODE) { | 256 | goto out; |
257 | } | ||
258 | t84h = reply->message; | ||
259 | if (t84h->code == TYPE84_RSP_CODE) { | ||
257 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); | 260 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); |
258 | memcpy(msg->message, reply->message, length); | 261 | memcpy(msg->message, reply->message, length); |
259 | } else | 262 | } else |
260 | memcpy(msg->message, reply->message, sizeof error_reply); | 263 | memcpy(msg->message, reply->message, sizeof error_reply); |
264 | out: | ||
261 | complete((struct completion *) msg->private); | 265 | complete((struct completion *) msg->private); |
262 | } | 266 | } |
263 | 267 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 779952cb19fc..f4b0c4795434 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -447,19 +447,23 @@ static void zcrypt_pcicc_receive(struct ap_device *ap_dev, | |||
447 | .type = TYPE82_RSP_CODE, | 447 | .type = TYPE82_RSP_CODE, |
448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
449 | }; | 449 | }; |
450 | struct type86_reply *t86r = reply->message; | 450 | struct type86_reply *t86r; |
451 | int length; | 451 | int length; |
452 | 452 | ||
453 | /* Copy the reply message to the request message buffer. */ | 453 | /* Copy the reply message to the request message buffer. */ |
454 | if (IS_ERR(reply)) | 454 | if (IS_ERR(reply)) { |
455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
456 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 456 | goto out; |
457 | } | ||
458 | t86r = reply->message; | ||
459 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
457 | t86r->cprb.cprb_ver_id == 0x01) { | 460 | t86r->cprb.cprb_ver_id == 0x01) { |
458 | length = sizeof(struct type86_reply) + t86r->length - 2; | 461 | length = sizeof(struct type86_reply) + t86r->length - 2; |
459 | length = min(PCICC_MAX_RESPONSE_SIZE, length); | 462 | length = min(PCICC_MAX_RESPONSE_SIZE, length); |
460 | memcpy(msg->message, reply->message, length); | 463 | memcpy(msg->message, reply->message, length); |
461 | } else | 464 | } else |
462 | memcpy(msg->message, reply->message, sizeof error_reply); | 465 | memcpy(msg->message, reply->message, sizeof error_reply); |
466 | out: | ||
463 | complete((struct completion *) msg->private); | 467 | complete((struct completion *) msg->private); |
464 | } | 468 | } |
465 | 469 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index d8ad36f81540..e7a1e22e77ac 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -635,13 +635,16 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
635 | }; | 635 | }; |
636 | struct response_type *resp_type = | 636 | struct response_type *resp_type = |
637 | (struct response_type *) msg->private; | 637 | (struct response_type *) msg->private; |
638 | struct type86x_reply *t86r = reply->message; | 638 | struct type86x_reply *t86r; |
639 | int length; | 639 | int length; |
640 | 640 | ||
641 | /* Copy the reply message to the request message buffer. */ | 641 | /* Copy the reply message to the request message buffer. */ |
642 | if (IS_ERR(reply)) | 642 | if (IS_ERR(reply)) { |
643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
644 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 644 | goto out; |
645 | } | ||
646 | t86r = reply->message; | ||
647 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
645 | t86r->cprbx.cprb_ver_id == 0x02) { | 648 | t86r->cprbx.cprb_ver_id == 0x02) { |
646 | switch (resp_type->type) { | 649 | switch (resp_type->type) { |
647 | case PCIXCC_RESPONSE_TYPE_ICA: | 650 | case PCIXCC_RESPONSE_TYPE_ICA: |
@@ -660,6 +663,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
660 | } | 663 | } |
661 | } else | 664 | } else |
662 | memcpy(msg->message, reply->message, sizeof error_reply); | 665 | memcpy(msg->message, reply->message, sizeof error_reply); |
666 | out: | ||
663 | complete(&(resp_type->work)); | 667 | complete(&(resp_type->work)); |
664 | } | 668 | } |
665 | 669 | ||
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 42776550acfd..f29c7086fc19 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #undef DEBUGDATA | 13 | #undef DEBUGDATA |
14 | #undef DEBUGCCW | 14 | #undef DEBUGCCW |
15 | 15 | ||
16 | #define KMSG_COMPONENT "ctcm" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -190,21 +193,22 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); | |||
190 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) | 193 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) |
191 | { | 194 | { |
192 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 195 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
193 | "%s(%s): %s: %04x\n", | 196 | "%s(%s): %s: %04x\n", |
194 | CTCM_FUNTAIL, ch->id, msg, rc); | 197 | CTCM_FUNTAIL, ch->id, msg, rc); |
195 | switch (rc) { | 198 | switch (rc) { |
196 | case -EBUSY: | 199 | case -EBUSY: |
197 | ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); | 200 | pr_info("%s: The communication peer is busy\n", |
201 | ch->id); | ||
198 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); | 202 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); |
199 | break; | 203 | break; |
200 | case -ENODEV: | 204 | case -ENODEV: |
201 | ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", | 205 | pr_err("%s: The specified target device is not valid\n", |
202 | ch->id, msg); | 206 | ch->id); |
203 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); | 207 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); |
204 | break; | 208 | break; |
205 | default: | 209 | default: |
206 | ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", | 210 | pr_err("An I/O operation resulted in error %04x\n", |
207 | ch->id, msg, rc); | 211 | rc); |
208 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); | 212 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); |
209 | } | 213 | } |
210 | } | 214 | } |
@@ -886,8 +890,15 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) | |||
886 | fsm_newstate(fi, CTC_STATE_RXERR); | 890 | fsm_newstate(fi, CTC_STATE_RXERR); |
887 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 891 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
888 | } | 892 | } |
889 | } else | 893 | } else { |
890 | ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); | 894 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
895 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | ||
896 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | ||
897 | |||
898 | dev_warn(&dev->dev, | ||
899 | "Initialization failed with RX/TX init handshake " | ||
900 | "error %s\n", ctc_ch_event_names[event]); | ||
901 | } | ||
891 | } | 902 | } |
892 | 903 | ||
893 | /** | 904 | /** |
@@ -969,7 +980,9 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
969 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | 980 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, |
970 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | 981 | ctc_ch_event_names[event], fsm_getstate_str(fi)); |
971 | 982 | ||
972 | ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); | 983 | dev_warn(&dev->dev, |
984 | "Initialization failed with RX/TX init handshake " | ||
985 | "error %s\n", ctc_ch_event_names[event]); | ||
973 | } | 986 | } |
974 | } | 987 | } |
975 | 988 | ||
@@ -2101,14 +2114,11 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) | |||
2101 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); | 2114 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); |
2102 | 2115 | ||
2103 | if (IS_MPC(priv)) { | 2116 | if (IS_MPC(priv)) { |
2104 | ctcm_pr_info("ctcm: %s Restarting Device and " | ||
2105 | "MPC Group in 5 seconds\n", | ||
2106 | dev->name); | ||
2107 | restart_timer = CTCM_TIME_1_SEC; | 2117 | restart_timer = CTCM_TIME_1_SEC; |
2108 | } else { | 2118 | } else { |
2109 | ctcm_pr_info("%s: Restarting\n", dev->name); | ||
2110 | restart_timer = CTCM_TIME_5_SEC; | 2119 | restart_timer = CTCM_TIME_5_SEC; |
2111 | } | 2120 | } |
2121 | dev_info(&dev->dev, "Restarting device\n"); | ||
2112 | 2122 | ||
2113 | dev_action_stop(fi, event, arg); | 2123 | dev_action_stop(fi, event, arg); |
2114 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); | 2124 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); |
@@ -2150,16 +2160,16 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) | |||
2150 | case DEV_STATE_STARTWAIT_RX: | 2160 | case DEV_STATE_STARTWAIT_RX: |
2151 | if (event == DEV_EVENT_RXUP) { | 2161 | if (event == DEV_EVENT_RXUP) { |
2152 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2162 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2153 | ctcm_pr_info("%s: connected with remote side\n", | 2163 | dev_info(&dev->dev, |
2154 | dev->name); | 2164 | "Connected with remote side\n"); |
2155 | ctcm_clear_busy(dev); | 2165 | ctcm_clear_busy(dev); |
2156 | } | 2166 | } |
2157 | break; | 2167 | break; |
2158 | case DEV_STATE_STARTWAIT_TX: | 2168 | case DEV_STATE_STARTWAIT_TX: |
2159 | if (event == DEV_EVENT_TXUP) { | 2169 | if (event == DEV_EVENT_TXUP) { |
2160 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2170 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2161 | ctcm_pr_info("%s: connected with remote side\n", | 2171 | dev_info(&dev->dev, |
2162 | dev->name); | 2172 | "Connected with remote side\n"); |
2163 | ctcm_clear_busy(dev); | 2173 | ctcm_clear_busy(dev); |
2164 | } | 2174 | } |
2165 | break; | 2175 | break; |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index a4e29836a2aa..2678573becec 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -21,6 +21,9 @@ | |||
21 | #undef DEBUGDATA | 21 | #undef DEBUGDATA |
22 | #undef DEBUGCCW | 22 | #undef DEBUGCCW |
23 | 23 | ||
24 | #define KMSG_COMPONENT "ctcm" | ||
25 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
26 | |||
24 | #include <linux/module.h> | 27 | #include <linux/module.h> |
25 | #include <linux/init.h> | 28 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -281,14 +284,16 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
281 | 284 | ||
282 | switch (PTR_ERR(irb)) { | 285 | switch (PTR_ERR(irb)) { |
283 | case -EIO: | 286 | case -EIO: |
284 | ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev)); | 287 | dev_err(&cdev->dev, |
288 | "An I/O-error occurred on the CTCM device\n"); | ||
285 | break; | 289 | break; |
286 | case -ETIMEDOUT: | 290 | case -ETIMEDOUT: |
287 | ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev)); | 291 | dev_err(&cdev->dev, |
292 | "An adapter hardware operation timed out\n"); | ||
288 | break; | 293 | break; |
289 | default: | 294 | default: |
290 | ctcm_pr_warn("unknown error %ld on device %s\n", | 295 | dev_err(&cdev->dev, |
291 | PTR_ERR(irb), dev_name(&cdev->dev)); | 296 | "An error occurred on the adapter hardware\n"); |
292 | } | 297 | } |
293 | return PTR_ERR(irb); | 298 | return PTR_ERR(irb); |
294 | } | 299 | } |
@@ -309,15 +314,17 @@ static inline void ccw_unit_check(struct channel *ch, __u8 sense) | |||
309 | if (sense & SNS0_INTERVENTION_REQ) { | 314 | if (sense & SNS0_INTERVENTION_REQ) { |
310 | if (sense & 0x01) { | 315 | if (sense & 0x01) { |
311 | if (ch->sense_rc != 0x01) { | 316 | if (ch->sense_rc != 0x01) { |
312 | ctcm_pr_debug("%s: Interface disc. or Sel. " | 317 | pr_notice( |
313 | "reset (remote)\n", ch->id); | 318 | "%s: The communication peer has " |
319 | "disconnected\n", ch->id); | ||
314 | ch->sense_rc = 0x01; | 320 | ch->sense_rc = 0x01; |
315 | } | 321 | } |
316 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); | 322 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); |
317 | } else { | 323 | } else { |
318 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { | 324 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { |
319 | ctcm_pr_debug("%s: System reset (remote)\n", | 325 | pr_notice( |
320 | ch->id); | 326 | "%s: The remote operating system is " |
327 | "not available\n", ch->id); | ||
321 | ch->sense_rc = SNS0_INTERVENTION_REQ; | 328 | ch->sense_rc = SNS0_INTERVENTION_REQ; |
322 | } | 329 | } |
323 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); | 330 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); |
@@ -1194,8 +1201,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1194 | 1201 | ||
1195 | /* Check for unsolicited interrupts. */ | 1202 | /* Check for unsolicited interrupts. */ |
1196 | if (cgdev == NULL) { | 1203 | if (cgdev == NULL) { |
1197 | ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n", | 1204 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, |
1198 | cstat, dstat); | 1205 | "%s(%s) unsolicited irq: c-%02x d-%02x\n", |
1206 | CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); | ||
1207 | dev_warn(&cdev->dev, | ||
1208 | "The adapter received a non-specific IRQ\n"); | ||
1199 | return; | 1209 | return; |
1200 | } | 1210 | } |
1201 | 1211 | ||
@@ -1207,31 +1217,34 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1207 | else if (priv->channel[WRITE]->cdev == cdev) | 1217 | else if (priv->channel[WRITE]->cdev == cdev) |
1208 | ch = priv->channel[WRITE]; | 1218 | ch = priv->channel[WRITE]; |
1209 | else { | 1219 | else { |
1210 | ctcm_pr_err("ctcm: Can't determine channel for interrupt, " | 1220 | dev_err(&cdev->dev, |
1211 | "device %s\n", dev_name(&cdev->dev)); | 1221 | "%s: Internal error: Can't determine channel for " |
1222 | "interrupt device %s\n", | ||
1223 | __func__, dev_name(&cdev->dev)); | ||
1224 | /* Explain: inconsistent internal structures */ | ||
1212 | return; | 1225 | return; |
1213 | } | 1226 | } |
1214 | 1227 | ||
1215 | dev = ch->netdev; | 1228 | dev = ch->netdev; |
1216 | if (dev == NULL) { | 1229 | if (dev == NULL) { |
1217 | ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", | 1230 | dev_err(&cdev->dev, |
1218 | __func__, dev_name(&cdev->dev), ch); | 1231 | "%s Internal error: net_device is NULL, ch = 0x%p\n", |
1232 | __func__, ch); | ||
1233 | /* Explain: inconsistent internal structures */ | ||
1219 | return; | 1234 | return; |
1220 | } | 1235 | } |
1221 | 1236 | ||
1222 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, | ||
1223 | "%s(%s): int. for %s: cstat=%02x dstat=%02x", | ||
1224 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); | ||
1225 | |||
1226 | /* Copy interruption response block. */ | 1237 | /* Copy interruption response block. */ |
1227 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1238 | memcpy(ch->irb, irb, sizeof(struct irb)); |
1228 | 1239 | ||
1240 | /* Issue error message and return on subchannel error code */ | ||
1229 | if (irb->scsw.cmd.cstat) { | 1241 | if (irb->scsw.cmd.cstat) { |
1230 | /* Check for good subchannel return code, otherwise error message */ | ||
1231 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1242 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
1232 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1243 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1233 | dev->name, ch->id, irb->scsw.cmd.cstat, | 1244 | "%s(%s): sub-ch check %s: cs=%02x ds=%02x", |
1234 | irb->scsw.cmd.dstat); | 1245 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); |
1246 | dev_warn(&cdev->dev, | ||
1247 | "A check occurred on the subchannel\n"); | ||
1235 | return; | 1248 | return; |
1236 | } | 1249 | } |
1237 | 1250 | ||
@@ -1239,7 +1252,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1239 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 1252 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
1240 | if ((irb->ecw[0] & ch->sense_rc) == 0) | 1253 | if ((irb->ecw[0] & ch->sense_rc) == 0) |
1241 | /* print it only once */ | 1254 | /* print it only once */ |
1242 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, | 1255 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1243 | "%s(%s): sense=%02x, ds=%02x", | 1256 | "%s(%s): sense=%02x, ds=%02x", |
1244 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); | 1257 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); |
1245 | ccw_unit_check(ch, irb->ecw[0]); | 1258 | ccw_unit_check(ch, irb->ecw[0]); |
@@ -1574,6 +1587,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1574 | 1587 | ||
1575 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); | 1588 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); |
1576 | 1589 | ||
1590 | dev_info(&dev->dev, | ||
1591 | "setup OK : r/w = %s/%s, protocol : %d\n", | ||
1592 | priv->channel[READ]->id, | ||
1593 | priv->channel[WRITE]->id, priv->protocol); | ||
1594 | |||
1577 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1595 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1578 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, | 1596 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, |
1579 | priv->channel[READ]->id, | 1597 | priv->channel[READ]->id, |
@@ -1687,7 +1705,7 @@ static void __exit ctcm_exit(void) | |||
1687 | { | 1705 | { |
1688 | unregister_cu3088_discipline(&ctcm_group_driver); | 1706 | unregister_cu3088_discipline(&ctcm_group_driver); |
1689 | ctcm_unregister_dbf_views(); | 1707 | ctcm_unregister_dbf_views(); |
1690 | ctcm_pr_info("CTCM driver unloaded\n"); | 1708 | pr_info("CTCM driver unloaded\n"); |
1691 | } | 1709 | } |
1692 | 1710 | ||
1693 | /* | 1711 | /* |
@@ -1695,7 +1713,7 @@ static void __exit ctcm_exit(void) | |||
1695 | */ | 1713 | */ |
1696 | static void print_banner(void) | 1714 | static void print_banner(void) |
1697 | { | 1715 | { |
1698 | printk(KERN_INFO "CTCM driver initialized\n"); | 1716 | pr_info("CTCM driver initialized\n"); |
1699 | } | 1717 | } |
1700 | 1718 | ||
1701 | /** | 1719 | /** |
@@ -1717,8 +1735,8 @@ static int __init ctcm_init(void) | |||
1717 | ret = register_cu3088_discipline(&ctcm_group_driver); | 1735 | ret = register_cu3088_discipline(&ctcm_group_driver); |
1718 | if (ret) { | 1736 | if (ret) { |
1719 | ctcm_unregister_dbf_views(); | 1737 | ctcm_unregister_dbf_views(); |
1720 | ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline " | 1738 | pr_err("%s / register_cu3088_discipline failed, ret = %d\n", |
1721 | "(rc = %d)\n", ret); | 1739 | __func__, ret); |
1722 | return ret; | 1740 | return ret; |
1723 | } | 1741 | } |
1724 | print_banner(); | 1742 | print_banner(); |
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d77cce3fe4d4..d925e732b7d8 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -41,12 +41,6 @@ | |||
41 | #define LOG_FLAG_NOMEM 8 | 41 | #define LOG_FLAG_NOMEM 8 |
42 | 42 | ||
43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) | 43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) |
44 | #define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg) | ||
45 | #define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg) | ||
46 | #define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) | ||
47 | #define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg) | ||
48 | #define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) | ||
49 | #define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) | ||
50 | 44 | ||
51 | #define CTCM_PR_DEBUG(fmt, arg...) \ | 45 | #define CTCM_PR_DEBUG(fmt, arg...) \ |
52 | do { \ | 46 | do { \ |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 19f5d5ed85e0..3db5f846bbf6 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -19,6 +19,9 @@ | |||
19 | #undef DEBUGDATA | 19 | #undef DEBUGDATA |
20 | #undef DEBUGCCW | 20 | #undef DEBUGCCW |
21 | 21 | ||
22 | #define KMSG_COMPONENT "ctcm" | ||
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
24 | |||
22 | #include <linux/module.h> | 25 | #include <linux/module.h> |
23 | #include <linux/init.h> | 26 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -386,7 +389,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | |||
386 | if (grp->allocchan_callback_retries < 4) { | 389 | if (grp->allocchan_callback_retries < 4) { |
387 | if (grp->allochanfunc) | 390 | if (grp->allochanfunc) |
388 | grp->allochanfunc(grp->port_num, | 391 | grp->allochanfunc(grp->port_num, |
389 | grp->group_max_buflen); | 392 | grp->group_max_buflen); |
390 | } else { | 393 | } else { |
391 | /* there are problems...bail out */ | 394 | /* there are problems...bail out */ |
392 | /* there may be a state mismatch so restart */ | 395 | /* there may be a state mismatch so restart */ |
@@ -1232,8 +1235,9 @@ done: | |||
1232 | 1235 | ||
1233 | dev_kfree_skb_any(pskb); | 1236 | dev_kfree_skb_any(pskb); |
1234 | if (sendrc == NET_RX_DROP) { | 1237 | if (sendrc == NET_RX_DROP) { |
1235 | printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" | 1238 | dev_warn(&dev->dev, |
1236 | " - PACKET DROPPED\n", dev->name, __func__); | 1239 | "The network backlog for %s is exceeded, " |
1240 | "package dropped\n", __func__); | ||
1237 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1241 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1238 | } | 1242 | } |
1239 | 1243 | ||
@@ -1670,10 +1674,11 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1670 | CTCM_FUNTAIL, ch->id); | 1674 | CTCM_FUNTAIL, ch->id); |
1671 | } | 1675 | } |
1672 | } | 1676 | } |
1673 | |||
1674 | done: | 1677 | done: |
1675 | if (rc) { | 1678 | if (rc) { |
1676 | ctcm_pr_info("ctcmpc : %s() failed\n", __func__); | 1679 | dev_warn(&dev->dev, |
1680 | "The XID used in the MPC protocol is not valid, " | ||
1681 | "rc = %d\n", rc); | ||
1677 | priv->xid->xid2_flag2 = 0x40; | 1682 | priv->xid->xid2_flag2 = 0x40; |
1678 | grp->saved_xid2->xid2_flag2 = 0x40; | 1683 | grp->saved_xid2->xid2_flag2 = 0x40; |
1679 | } | 1684 | } |
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index bb2d13721d34..8452bb052d68 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c | |||
@@ -10,6 +10,9 @@ | |||
10 | #undef DEBUGDATA | 10 | #undef DEBUGDATA |
11 | #undef DEBUGCCW | 11 | #undef DEBUGCCW |
12 | 12 | ||
13 | #define KMSG_COMPONENT "ctcm" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
14 | #include "ctcm_main.h" | 17 | #include "ctcm_main.h" |
15 | 18 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index c7a036a5d7a6..acca6678cb2b 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -26,6 +26,9 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define KMSG_COMPONENT "lcs" | ||
30 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
31 | |||
29 | #include <linux/module.h> | 32 | #include <linux/module.h> |
30 | #include <linux/if.h> | 33 | #include <linux/if.h> |
31 | #include <linux/netdevice.h> | 34 | #include <linux/netdevice.h> |
@@ -54,8 +57,6 @@ | |||
54 | #error Cannot compile lcs.c without some net devices switched on. | 57 | #error Cannot compile lcs.c without some net devices switched on. |
55 | #endif | 58 | #endif |
56 | 59 | ||
57 | #define PRINTK_HEADER " lcs: " | ||
58 | |||
59 | /** | 60 | /** |
60 | * initialization string for output | 61 | * initialization string for output |
61 | */ | 62 | */ |
@@ -96,7 +97,7 @@ lcs_register_debug_facility(void) | |||
96 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); | 97 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); |
97 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); | 98 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); |
98 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { | 99 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { |
99 | PRINT_ERR("Not enough memory for debug facility.\n"); | 100 | pr_err("Not enough memory for debug facility.\n"); |
100 | lcs_unregister_debug_facility(); | 101 | lcs_unregister_debug_facility(); |
101 | return -ENOMEM; | 102 | return -ENOMEM; |
102 | } | 103 | } |
@@ -503,7 +504,9 @@ lcs_start_channel(struct lcs_channel *channel) | |||
503 | if (rc) { | 504 | if (rc) { |
504 | LCS_DBF_TEXT_(4,trace,"essh%s", | 505 | LCS_DBF_TEXT_(4,trace,"essh%s", |
505 | dev_name(&channel->ccwdev->dev)); | 506 | dev_name(&channel->ccwdev->dev)); |
506 | PRINT_ERR("Error in starting channel, rc=%d!\n", rc); | 507 | dev_err(&channel->ccwdev->dev, |
508 | "Starting an LCS device resulted in an error," | ||
509 | " rc=%d!\n", rc); | ||
507 | } | 510 | } |
508 | return rc; | 511 | return rc; |
509 | } | 512 | } |
@@ -640,7 +643,9 @@ __lcs_resume_channel(struct lcs_channel *channel) | |||
640 | if (rc) { | 643 | if (rc) { |
641 | LCS_DBF_TEXT_(4, trace, "ersc%s", | 644 | LCS_DBF_TEXT_(4, trace, "ersc%s", |
642 | dev_name(&channel->ccwdev->dev)); | 645 | dev_name(&channel->ccwdev->dev)); |
643 | PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); | 646 | dev_err(&channel->ccwdev->dev, |
647 | "Sending data from the LCS device to the LAN failed" | ||
648 | " with rc=%d\n",rc); | ||
644 | } else | 649 | } else |
645 | channel->state = LCS_CH_STATE_RUNNING; | 650 | channel->state = LCS_CH_STATE_RUNNING; |
646 | return rc; | 651 | return rc; |
@@ -1086,7 +1091,7 @@ lcs_check_multicast_support(struct lcs_card *card) | |||
1086 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; | 1091 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; |
1087 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); | 1092 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); |
1088 | if (rc != 0) { | 1093 | if (rc != 0) { |
1089 | PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); | 1094 | pr_err("Query IPAssist failed. Assuming unsupported!\n"); |
1090 | return -EOPNOTSUPP; | 1095 | return -EOPNOTSUPP; |
1091 | } | 1096 | } |
1092 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) | 1097 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) |
@@ -1119,8 +1124,8 @@ list_modified: | |||
1119 | rc = lcs_send_setipm(card, ipm); | 1124 | rc = lcs_send_setipm(card, ipm); |
1120 | spin_lock_irqsave(&card->ipm_lock, flags); | 1125 | spin_lock_irqsave(&card->ipm_lock, flags); |
1121 | if (rc) { | 1126 | if (rc) { |
1122 | PRINT_INFO("Adding multicast address failed. " | 1127 | pr_info("Adding multicast address failed." |
1123 | "Table possibly full!\n"); | 1128 | " Table possibly full!\n"); |
1124 | /* store ipm in failed list -> will be added | 1129 | /* store ipm in failed list -> will be added |
1125 | * to ipm_list again, so a retry will be done | 1130 | * to ipm_list again, so a retry will be done |
1126 | * during the next call of this function */ | 1131 | * during the next call of this function */ |
@@ -1231,8 +1236,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) | |||
1231 | ipm = (struct lcs_ipm_list *) | 1236 | ipm = (struct lcs_ipm_list *) |
1232 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); | 1237 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); |
1233 | if (ipm == NULL) { | 1238 | if (ipm == NULL) { |
1234 | PRINT_INFO("Not enough memory to add " | 1239 | pr_info("Not enough memory to add" |
1235 | "new multicast entry!\n"); | 1240 | " new multicast entry!\n"); |
1236 | break; | 1241 | break; |
1237 | } | 1242 | } |
1238 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); | 1243 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); |
@@ -1306,18 +1311,21 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
1306 | 1311 | ||
1307 | switch (PTR_ERR(irb)) { | 1312 | switch (PTR_ERR(irb)) { |
1308 | case -EIO: | 1313 | case -EIO: |
1309 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 1314 | dev_warn(&cdev->dev, |
1315 | "An I/O-error occurred on the LCS device\n"); | ||
1310 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1316 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1311 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); | 1317 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); |
1312 | break; | 1318 | break; |
1313 | case -ETIMEDOUT: | 1319 | case -ETIMEDOUT: |
1314 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 1320 | dev_warn(&cdev->dev, |
1321 | "A command timed out on the LCS device\n"); | ||
1315 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1322 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1316 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); | 1323 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); |
1317 | break; | 1324 | break; |
1318 | default: | 1325 | default: |
1319 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 1326 | dev_warn(&cdev->dev, |
1320 | dev_name(&cdev->dev)); | 1327 | "An error occurred on the LCS device, rc=%ld\n", |
1328 | PTR_ERR(irb)); | ||
1321 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1329 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1322 | LCS_DBF_TEXT(2, trace, " rc???"); | 1330 | LCS_DBF_TEXT(2, trace, " rc???"); |
1323 | } | 1331 | } |
@@ -1403,8 +1411,10 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1403 | /* Check for channel and device errors presented */ | 1411 | /* Check for channel and device errors presented */ |
1404 | rc = lcs_get_problem(cdev, irb); | 1412 | rc = lcs_get_problem(cdev, irb); |
1405 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { | 1413 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { |
1406 | PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", | 1414 | dev_warn(&cdev->dev, |
1407 | dev_name(&cdev->dev), dstat, cstat); | 1415 | "The LCS device stopped because of an error," |
1416 | " dstat=0x%X, cstat=0x%X \n", | ||
1417 | dstat, cstat); | ||
1408 | if (rc) { | 1418 | if (rc) { |
1409 | channel->state = LCS_CH_STATE_ERROR; | 1419 | channel->state = LCS_CH_STATE_ERROR; |
1410 | } | 1420 | } |
@@ -1761,8 +1771,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) | |||
1761 | lcs_schedule_recovery(card); | 1771 | lcs_schedule_recovery(card); |
1762 | break; | 1772 | break; |
1763 | case LCS_CMD_STOPLAN: | 1773 | case LCS_CMD_STOPLAN: |
1764 | PRINT_WARN("Stoplan for %s initiated by LGW.\n", | 1774 | pr_warning("Stoplan for %s initiated by LGW.\n", |
1765 | card->dev->name); | 1775 | card->dev->name); |
1766 | if (card->dev) | 1776 | if (card->dev) |
1767 | netif_carrier_off(card->dev); | 1777 | netif_carrier_off(card->dev); |
1768 | break; | 1778 | break; |
@@ -1790,7 +1800,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
1790 | 1800 | ||
1791 | skb = dev_alloc_skb(skb_len); | 1801 | skb = dev_alloc_skb(skb_len); |
1792 | if (skb == NULL) { | 1802 | if (skb == NULL) { |
1793 | PRINT_ERR("LCS: alloc_skb failed for device=%s\n", | 1803 | dev_err(&card->dev->dev, |
1804 | " Allocating a socket buffer to interface %s failed\n", | ||
1794 | card->dev->name); | 1805 | card->dev->name); |
1795 | card->stats.rx_dropped++; | 1806 | card->stats.rx_dropped++; |
1796 | return; | 1807 | return; |
@@ -1886,7 +1897,8 @@ lcs_stop_device(struct net_device *dev) | |||
1886 | (card->write.state != LCS_CH_STATE_RUNNING)); | 1897 | (card->write.state != LCS_CH_STATE_RUNNING)); |
1887 | rc = lcs_stopcard(card); | 1898 | rc = lcs_stopcard(card); |
1888 | if (rc) | 1899 | if (rc) |
1889 | PRINT_ERR("Try it again!\n "); | 1900 | dev_err(&card->dev->dev, |
1901 | " Shutting down the LCS device failed\n "); | ||
1890 | return rc; | 1902 | return rc; |
1891 | } | 1903 | } |
1892 | 1904 | ||
@@ -1905,7 +1917,7 @@ lcs_open_device(struct net_device *dev) | |||
1905 | /* initialize statistics */ | 1917 | /* initialize statistics */ |
1906 | rc = lcs_detect(card); | 1918 | rc = lcs_detect(card); |
1907 | if (rc) { | 1919 | if (rc) { |
1908 | PRINT_ERR("LCS:Error in opening device!\n"); | 1920 | pr_err("Error in opening device!\n"); |
1909 | 1921 | ||
1910 | } else { | 1922 | } else { |
1911 | dev->flags |= IFF_UP; | 1923 | dev->flags |= IFF_UP; |
@@ -2113,8 +2125,9 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2113 | rc = lcs_detect(card); | 2125 | rc = lcs_detect(card); |
2114 | if (rc) { | 2126 | if (rc) { |
2115 | LCS_DBF_TEXT(2, setup, "dtctfail"); | 2127 | LCS_DBF_TEXT(2, setup, "dtctfail"); |
2116 | PRINT_WARN("Detection of LCS card failed with return code " | 2128 | dev_err(&card->dev->dev, |
2117 | "%d (0x%x)\n", rc, rc); | 2129 | "Detecting a network adapter for LCS devices" |
2130 | " failed with rc=%d (0x%x)\n", rc, rc); | ||
2118 | lcs_stopcard(card); | 2131 | lcs_stopcard(card); |
2119 | goto out; | 2132 | goto out; |
2120 | } | 2133 | } |
@@ -2144,7 +2157,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2144 | #endif | 2157 | #endif |
2145 | default: | 2158 | default: |
2146 | LCS_DBF_TEXT(3, setup, "errinit"); | 2159 | LCS_DBF_TEXT(3, setup, "errinit"); |
2147 | PRINT_ERR("LCS: Initialization failed\n"); | 2160 | pr_err(" Initialization failed\n"); |
2148 | goto out; | 2161 | goto out; |
2149 | } | 2162 | } |
2150 | if (!dev) | 2163 | if (!dev) |
@@ -2176,13 +2189,13 @@ netdev_out: | |||
2176 | goto out; | 2189 | goto out; |
2177 | 2190 | ||
2178 | /* Print out supported assists: IPv6 */ | 2191 | /* Print out supported assists: IPv6 */ |
2179 | PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name, | 2192 | pr_info("LCS device %s %s IPv6 support\n", card->dev->name, |
2180 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? | 2193 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? |
2181 | "with" : "without"); | 2194 | "with" : "without"); |
2182 | /* Print out supported assist: Multicast */ | 2195 | /* Print out supported assist: Multicast */ |
2183 | PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name, | 2196 | pr_info("LCS device %s %s Multicast support\n", card->dev->name, |
2184 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? | 2197 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? |
2185 | "with" : "without"); | 2198 | "with" : "without"); |
2186 | return 0; | 2199 | return 0; |
2187 | out: | 2200 | out: |
2188 | 2201 | ||
@@ -2248,15 +2261,16 @@ lcs_recovery(void *ptr) | |||
2248 | return 0; | 2261 | return 0; |
2249 | LCS_DBF_TEXT(4, trace, "recover2"); | 2262 | LCS_DBF_TEXT(4, trace, "recover2"); |
2250 | gdev = card->gdev; | 2263 | gdev = card->gdev; |
2251 | PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev)); | 2264 | dev_warn(&gdev->dev, |
2265 | "A recovery process has been started for the LCS device\n"); | ||
2252 | rc = __lcs_shutdown_device(gdev, 1); | 2266 | rc = __lcs_shutdown_device(gdev, 1); |
2253 | rc = lcs_new_device(gdev); | 2267 | rc = lcs_new_device(gdev); |
2254 | if (!rc) | 2268 | if (!rc) |
2255 | PRINT_INFO("Device %s successfully recovered!\n", | 2269 | pr_info("Device %s successfully recovered!\n", |
2256 | card->dev->name); | 2270 | card->dev->name); |
2257 | else | 2271 | else |
2258 | PRINT_INFO("Device %s could not be recovered!\n", | 2272 | pr_info("Device %s could not be recovered!\n", |
2259 | card->dev->name); | 2273 | card->dev->name); |
2260 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); | 2274 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); |
2261 | return 0; | 2275 | return 0; |
2262 | } | 2276 | } |
@@ -2308,17 +2322,17 @@ __init lcs_init_module(void) | |||
2308 | { | 2322 | { |
2309 | int rc; | 2323 | int rc; |
2310 | 2324 | ||
2311 | PRINT_INFO("Loading %s\n",version); | 2325 | pr_info("Loading %s\n", version); |
2312 | rc = lcs_register_debug_facility(); | 2326 | rc = lcs_register_debug_facility(); |
2313 | LCS_DBF_TEXT(0, setup, "lcsinit"); | 2327 | LCS_DBF_TEXT(0, setup, "lcsinit"); |
2314 | if (rc) { | 2328 | if (rc) { |
2315 | PRINT_ERR("Initialization failed\n"); | 2329 | pr_err("Initialization failed\n"); |
2316 | return rc; | 2330 | return rc; |
2317 | } | 2331 | } |
2318 | 2332 | ||
2319 | rc = register_cu3088_discipline(&lcs_group_driver); | 2333 | rc = register_cu3088_discipline(&lcs_group_driver); |
2320 | if (rc) { | 2334 | if (rc) { |
2321 | PRINT_ERR("Initialization failed\n"); | 2335 | pr_err("Initialization failed\n"); |
2322 | return rc; | 2336 | return rc; |
2323 | } | 2337 | } |
2324 | return 0; | 2338 | return 0; |
@@ -2331,7 +2345,7 @@ __init lcs_init_module(void) | |||
2331 | static void | 2345 | static void |
2332 | __exit lcs_cleanup_module(void) | 2346 | __exit lcs_cleanup_module(void) |
2333 | { | 2347 | { |
2334 | PRINT_INFO("Terminating lcs module.\n"); | 2348 | pr_info("Terminating lcs module.\n"); |
2335 | LCS_DBF_TEXT(0, trace, "cleanup"); | 2349 | LCS_DBF_TEXT(0, trace, "cleanup"); |
2336 | unregister_cu3088_discipline(&lcs_group_driver); | 2350 | unregister_cu3088_discipline(&lcs_group_driver); |
2337 | lcs_unregister_debug_facility(); | 2351 | lcs_unregister_debug_facility(); |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 0fea51e34b57..930e2fc2a011 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -31,6 +31,9 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define KMSG_COMPONENT "netiucv" | ||
35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
36 | |||
34 | #undef DEBUG | 37 | #undef DEBUG |
35 | 38 | ||
36 | #include <linux/module.h> | 39 | #include <linux/module.h> |
@@ -846,7 +849,8 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
846 | 849 | ||
847 | fsm_deltimer(&conn->timer); | 850 | fsm_deltimer(&conn->timer); |
848 | iucv_path_sever(conn->path, NULL); | 851 | iucv_path_sever(conn->path, NULL); |
849 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); | 852 | dev_info(privptr->dev, "The peer interface of the IUCV device" |
853 | " has closed the connection\n"); | ||
850 | IUCV_DBF_TEXT(data, 2, | 854 | IUCV_DBF_TEXT(data, 2, |
851 | "conn_action_connsever: Remote dropped connection\n"); | 855 | "conn_action_connsever: Remote dropped connection\n"); |
852 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 856 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
@@ -856,13 +860,15 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
856 | static void conn_action_start(fsm_instance *fi, int event, void *arg) | 860 | static void conn_action_start(fsm_instance *fi, int event, void *arg) |
857 | { | 861 | { |
858 | struct iucv_connection *conn = arg; | 862 | struct iucv_connection *conn = arg; |
863 | struct net_device *netdev = conn->netdev; | ||
864 | struct netiucv_priv *privptr = netdev_priv(netdev); | ||
859 | int rc; | 865 | int rc; |
860 | 866 | ||
861 | IUCV_DBF_TEXT(trace, 3, __func__); | 867 | IUCV_DBF_TEXT(trace, 3, __func__); |
862 | 868 | ||
863 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 869 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
864 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", | 870 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", |
865 | conn->netdev->name, conn->userid); | 871 | netdev->name, conn->userid); |
866 | 872 | ||
867 | /* | 873 | /* |
868 | * We must set the state before calling iucv_connect because the | 874 | * We must set the state before calling iucv_connect because the |
@@ -876,41 +882,45 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) | |||
876 | NULL, iucvMagic, conn); | 882 | NULL, iucvMagic, conn); |
877 | switch (rc) { | 883 | switch (rc) { |
878 | case 0: | 884 | case 0: |
879 | conn->netdev->tx_queue_len = conn->path->msglim; | 885 | netdev->tx_queue_len = conn->path->msglim; |
880 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, | 886 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, |
881 | CONN_EVENT_TIMER, conn); | 887 | CONN_EVENT_TIMER, conn); |
882 | return; | 888 | return; |
883 | case 11: | 889 | case 11: |
884 | PRINT_INFO("%s: User %s is currently not available.\n", | 890 | dev_warn(privptr->dev, |
885 | conn->netdev->name, | 891 | "The IUCV device failed to connect to z/VM guest %s\n", |
886 | netiucv_printname(conn->userid)); | 892 | netiucv_printname(conn->userid)); |
887 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
888 | break; | 894 | break; |
889 | case 12: | 895 | case 12: |
890 | PRINT_INFO("%s: User %s is currently not ready.\n", | 896 | dev_warn(privptr->dev, |
891 | conn->netdev->name, | 897 | "The IUCV device failed to connect to the peer on z/VM" |
892 | netiucv_printname(conn->userid)); | 898 | " guest %s\n", netiucv_printname(conn->userid)); |
893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 899 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
894 | break; | 900 | break; |
895 | case 13: | 901 | case 13: |
896 | PRINT_WARN("%s: Too many IUCV connections.\n", | 902 | dev_err(privptr->dev, |
897 | conn->netdev->name); | 903 | "Connecting the IUCV device would exceed the maximum" |
904 | " number of IUCV connections\n"); | ||
898 | fsm_newstate(fi, CONN_STATE_CONNERR); | 905 | fsm_newstate(fi, CONN_STATE_CONNERR); |
899 | break; | 906 | break; |
900 | case 14: | 907 | case 14: |
901 | PRINT_WARN("%s: User %s has too many IUCV connections.\n", | 908 | dev_err(privptr->dev, |
902 | conn->netdev->name, | 909 | "z/VM guest %s has too many IUCV connections" |
903 | netiucv_printname(conn->userid)); | 910 | " to connect with the IUCV device\n", |
911 | netiucv_printname(conn->userid)); | ||
904 | fsm_newstate(fi, CONN_STATE_CONNERR); | 912 | fsm_newstate(fi, CONN_STATE_CONNERR); |
905 | break; | 913 | break; |
906 | case 15: | 914 | case 15: |
907 | PRINT_WARN("%s: No IUCV authorization in CP directory.\n", | 915 | dev_err(privptr->dev, |
908 | conn->netdev->name); | 916 | "The IUCV device cannot connect to a z/VM guest with no" |
917 | " IUCV authorization\n"); | ||
909 | fsm_newstate(fi, CONN_STATE_CONNERR); | 918 | fsm_newstate(fi, CONN_STATE_CONNERR); |
910 | break; | 919 | break; |
911 | default: | 920 | default: |
912 | PRINT_WARN("%s: iucv_connect returned error %d\n", | 921 | dev_err(privptr->dev, |
913 | conn->netdev->name, rc); | 922 | "Connecting the IUCV device failed with error %d\n", |
923 | rc); | ||
914 | fsm_newstate(fi, CONN_STATE_CONNERR); | 924 | fsm_newstate(fi, CONN_STATE_CONNERR); |
915 | break; | 925 | break; |
916 | } | 926 | } |
@@ -1059,8 +1069,9 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) | |||
1059 | switch (fsm_getstate(fi)) { | 1069 | switch (fsm_getstate(fi)) { |
1060 | case DEV_STATE_STARTWAIT: | 1070 | case DEV_STATE_STARTWAIT: |
1061 | fsm_newstate(fi, DEV_STATE_RUNNING); | 1071 | fsm_newstate(fi, DEV_STATE_RUNNING); |
1062 | PRINT_INFO("%s: connected with remote side %s\n", | 1072 | dev_info(privptr->dev, |
1063 | dev->name, privptr->conn->userid); | 1073 | "The IUCV device has been connected" |
1074 | " successfully to %s\n", privptr->conn->userid); | ||
1064 | IUCV_DBF_TEXT(setup, 3, | 1075 | IUCV_DBF_TEXT(setup, 3, |
1065 | "connection is up and running\n"); | 1076 | "connection is up and running\n"); |
1066 | break; | 1077 | break; |
@@ -1982,6 +1993,8 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1982 | if (rc) | 1993 | if (rc) |
1983 | goto out_unreg; | 1994 | goto out_unreg; |
1984 | 1995 | ||
1996 | dev_info(priv->dev, "The IUCV interface to %s has been" | ||
1997 | " established successfully\n", netiucv_printname(username)); | ||
1985 | 1998 | ||
1986 | return count; | 1999 | return count; |
1987 | 2000 | ||
@@ -2027,10 +2040,9 @@ static ssize_t remove_write (struct device_driver *drv, | |||
2027 | continue; | 2040 | continue; |
2028 | read_unlock_bh(&iucv_connection_rwlock); | 2041 | read_unlock_bh(&iucv_connection_rwlock); |
2029 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2042 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2030 | PRINT_WARN("netiucv: net device %s active with peer " | 2043 | dev_warn(dev, "The IUCV device is connected" |
2031 | "%s\n", ndev->name, priv->conn->userid); | 2044 | " to %s and cannot be removed\n", |
2032 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2045 | priv->conn->userid); |
2033 | ndev->name); | ||
2034 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2046 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2035 | return -EPERM; | 2047 | return -EPERM; |
2036 | } | 2048 | } |
@@ -2062,7 +2074,7 @@ static struct attribute_group *netiucv_drv_attr_groups[] = { | |||
2062 | 2074 | ||
2063 | static void netiucv_banner(void) | 2075 | static void netiucv_banner(void) |
2064 | { | 2076 | { |
2065 | PRINT_INFO("NETIUCV driver initialized\n"); | 2077 | pr_info("driver initialized\n"); |
2066 | } | 2078 | } |
2067 | 2079 | ||
2068 | static void __exit netiucv_exit(void) | 2080 | static void __exit netiucv_exit(void) |
@@ -2088,7 +2100,7 @@ static void __exit netiucv_exit(void) | |||
2088 | iucv_unregister(&netiucv_handler, 1); | 2100 | iucv_unregister(&netiucv_handler, 1); |
2089 | iucv_unregister_dbf_views(); | 2101 | iucv_unregister_dbf_views(); |
2090 | 2102 | ||
2091 | PRINT_INFO("NETIUCV driver unloaded\n"); | 2103 | pr_info("driver unloaded\n"); |
2092 | return; | 2104 | return; |
2093 | } | 2105 | } |
2094 | 2106 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index af6d60458513..d5ccce1643e4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -31,11 +31,10 @@ | |||
31 | #include <asm/qdio.h> | 31 | #include <asm/qdio.h> |
32 | #include <asm/ccwdev.h> | 32 | #include <asm/ccwdev.h> |
33 | #include <asm/ccwgroup.h> | 33 | #include <asm/ccwgroup.h> |
34 | #include <asm/sysinfo.h> | ||
34 | 35 | ||
35 | #include "qeth_core_mpc.h" | 36 | #include "qeth_core_mpc.h" |
36 | 37 | ||
37 | #define KMSG_COMPONENT "qeth" | ||
38 | |||
39 | /** | 38 | /** |
40 | * Debug Facility stuff | 39 | * Debug Facility stuff |
41 | */ | 40 | */ |
@@ -74,11 +73,6 @@ struct qeth_dbf_info { | |||
74 | #define QETH_DBF_TEXT_(name, level, text...) \ | 73 | #define QETH_DBF_TEXT_(name, level, text...) \ |
75 | qeth_dbf_longtext(QETH_DBF_##name, level, text) | 74 | qeth_dbf_longtext(QETH_DBF_##name, level, text) |
76 | 75 | ||
77 | /** | ||
78 | * some more debug stuff | ||
79 | */ | ||
80 | #define PRINTK_HEADER "qeth: " | ||
81 | |||
82 | #define SENSE_COMMAND_REJECT_BYTE 0 | 76 | #define SENSE_COMMAND_REJECT_BYTE 0 |
83 | #define SENSE_COMMAND_REJECT_FLAG 0x80 | 77 | #define SENSE_COMMAND_REJECT_FLAG 0x80 |
84 | #define SENSE_RESETTING_EVENT_BYTE 1 | 78 | #define SENSE_RESETTING_EVENT_BYTE 1 |
@@ -733,6 +727,7 @@ struct qeth_card { | |||
733 | struct qeth_osn_info osn_info; | 727 | struct qeth_osn_info osn_info; |
734 | struct qeth_discipline discipline; | 728 | struct qeth_discipline discipline; |
735 | atomic_t force_alloc_skb; | 729 | atomic_t force_alloc_skb; |
730 | struct service_level qeth_service_level; | ||
736 | }; | 731 | }; |
737 | 732 | ||
738 | struct qeth_card_list_struct { | 733 | struct qeth_card_list_struct { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 52d26592c72c..e783644a2105 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -319,7 +322,10 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
319 | return -EIO; | 322 | return -EIO; |
320 | iob = qeth_get_buffer(&card->read); | 323 | iob = qeth_get_buffer(&card->read); |
321 | if (!iob) { | 324 | if (!iob) { |
322 | PRINT_WARN("issue_next_read failed: no iob available!\n"); | 325 | dev_warn(&card->gdev->dev, "The qeth device driver " |
326 | "failed to recover an error on the device\n"); | ||
327 | QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " | ||
328 | "available\n", dev_name(&card->gdev->dev)); | ||
323 | return -ENOMEM; | 329 | return -ENOMEM; |
324 | } | 330 | } |
325 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 331 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
@@ -327,7 +333,8 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
327 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 333 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
328 | (addr_t) iob, 0, 0); | 334 | (addr_t) iob, 0, 0); |
329 | if (rc) { | 335 | if (rc) { |
330 | PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); | 336 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " |
337 | "rc=%i\n", dev_name(&card->gdev->dev), rc); | ||
331 | atomic_set(&card->read.irq_pending, 0); | 338 | atomic_set(&card->read.irq_pending, 0); |
332 | qeth_schedule_recovery(card); | 339 | qeth_schedule_recovery(card); |
333 | wake_up(&card->wait_q); | 340 | wake_up(&card->wait_q); |
@@ -393,10 +400,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
393 | } else { | 400 | } else { |
394 | switch (cmd->hdr.command) { | 401 | switch (cmd->hdr.command) { |
395 | case IPA_CMD_STOPLAN: | 402 | case IPA_CMD_STOPLAN: |
396 | PRINT_WARN("Link failure on %s (CHPID 0x%X) - " | 403 | dev_warn(&card->gdev->dev, |
397 | "there is a network problem or " | 404 | "The link for interface %s on CHPID" |
398 | "someone pulled the cable or " | 405 | " 0x%X failed\n", |
399 | "disabled the port.\n", | ||
400 | QETH_CARD_IFNAME(card), | 406 | QETH_CARD_IFNAME(card), |
401 | card->info.chpid); | 407 | card->info.chpid); |
402 | card->lan_online = 0; | 408 | card->lan_online = 0; |
@@ -404,9 +410,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
404 | netif_carrier_off(card->dev); | 410 | netif_carrier_off(card->dev); |
405 | return NULL; | 411 | return NULL; |
406 | case IPA_CMD_STARTLAN: | 412 | case IPA_CMD_STARTLAN: |
407 | PRINT_INFO("Link reestablished on %s " | 413 | dev_info(&card->gdev->dev, |
408 | "(CHPID 0x%X). Scheduling " | 414 | "The link for %s on CHPID 0x%X has" |
409 | "IP address reset.\n", | 415 | " been restored\n", |
410 | QETH_CARD_IFNAME(card), | 416 | QETH_CARD_IFNAME(card), |
411 | card->info.chpid); | 417 | card->info.chpid); |
412 | netif_carrier_on(card->dev); | 418 | netif_carrier_on(card->dev); |
@@ -458,7 +464,7 @@ static int qeth_check_idx_response(unsigned char *buffer) | |||
458 | 464 | ||
459 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); | 465 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); |
460 | if ((buffer[2] & 0xc0) == 0xc0) { | 466 | if ((buffer[2] & 0xc0) == 0xc0) { |
461 | PRINT_WARN("received an IDX TERMINATE " | 467 | QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " |
462 | "with cause code 0x%02x%s\n", | 468 | "with cause code 0x%02x%s\n", |
463 | buffer[4], | 469 | buffer[4], |
464 | ((buffer[4] == 0x22) ? | 470 | ((buffer[4] == 0x22) ? |
@@ -744,8 +750,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
744 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 750 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
745 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { | 751 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { |
746 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); | 752 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); |
747 | PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", | 753 | dev_warn(&cdev->dev, "The qeth device driver " |
748 | dev_name(&cdev->dev), dstat, cstat); | 754 | "failed to recover an error on the device\n"); |
755 | QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ", | ||
756 | dev_name(&cdev->dev), dstat, cstat); | ||
749 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, | 757 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, |
750 | 16, 1, irb, 64, 1); | 758 | 16, 1, irb, 64, 1); |
751 | return 1; | 759 | return 1; |
@@ -784,12 +792,14 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
784 | 792 | ||
785 | switch (PTR_ERR(irb)) { | 793 | switch (PTR_ERR(irb)) { |
786 | case -EIO: | 794 | case -EIO: |
787 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 795 | QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", |
796 | dev_name(&cdev->dev)); | ||
788 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 797 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
789 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); | 798 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); |
790 | break; | 799 | break; |
791 | case -ETIMEDOUT: | 800 | case -ETIMEDOUT: |
792 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 801 | dev_warn(&cdev->dev, "A hardware operation timed out" |
802 | " on the device\n"); | ||
793 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 803 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
794 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); | 804 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); |
795 | if (intparm == QETH_RCD_PARM) { | 805 | if (intparm == QETH_RCD_PARM) { |
@@ -802,8 +812,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
802 | } | 812 | } |
803 | break; | 813 | break; |
804 | default: | 814 | default: |
805 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 815 | QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", |
806 | dev_name(&cdev->dev)); | 816 | dev_name(&cdev->dev), PTR_ERR(irb)); |
807 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 817 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
808 | QETH_DBF_TEXT(TRACE, 2, " rc???"); | 818 | QETH_DBF_TEXT(TRACE, 2, " rc???"); |
809 | } | 819 | } |
@@ -869,10 +879,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
869 | (dstat & DEV_STAT_UNIT_CHECK) || | 879 | (dstat & DEV_STAT_UNIT_CHECK) || |
870 | (cstat)) { | 880 | (cstat)) { |
871 | if (irb->esw.esw0.erw.cons) { | 881 | if (irb->esw.esw0.erw.cons) { |
872 | /* TODO: we should make this s390dbf */ | 882 | dev_warn(&channel->ccwdev->dev, |
873 | PRINT_WARN("sense data available on channel %s.\n", | 883 | "The qeth device driver failed to recover " |
874 | CHANNEL_ID(channel)); | 884 | "an error on the device\n"); |
875 | PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); | 885 | QETH_DBF_MESSAGE(2, "%s sense data available. cstat " |
886 | "0x%X dstat 0x%X\n", | ||
887 | dev_name(&channel->ccwdev->dev), cstat, dstat); | ||
876 | print_hex_dump(KERN_WARNING, "qeth: irb ", | 888 | print_hex_dump(KERN_WARNING, "qeth: irb ", |
877 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); | 889 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); |
878 | print_hex_dump(KERN_WARNING, "qeth: sense data ", | 890 | print_hex_dump(KERN_WARNING, "qeth: sense data ", |
@@ -1138,6 +1150,14 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1138 | return 0; | 1150 | return 0; |
1139 | } | 1151 | } |
1140 | 1152 | ||
1153 | static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) | ||
1154 | { | ||
1155 | struct qeth_card *card = container_of(slr, struct qeth_card, | ||
1156 | qeth_service_level); | ||
1157 | seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), | ||
1158 | card->info.mcl_level); | ||
1159 | } | ||
1160 | |||
1141 | static struct qeth_card *qeth_alloc_card(void) | 1161 | static struct qeth_card *qeth_alloc_card(void) |
1142 | { | 1162 | { |
1143 | struct qeth_card *card; | 1163 | struct qeth_card *card; |
@@ -1157,6 +1177,8 @@ static struct qeth_card *qeth_alloc_card(void) | |||
1157 | return NULL; | 1177 | return NULL; |
1158 | } | 1178 | } |
1159 | card->options.layer2 = -1; | 1179 | card->options.layer2 = -1; |
1180 | card->qeth_service_level.seq_print = qeth_core_sl_print; | ||
1181 | register_service_level(&card->qeth_service_level); | ||
1160 | return card; | 1182 | return card; |
1161 | } | 1183 | } |
1162 | 1184 | ||
@@ -1175,8 +1197,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1175 | card->qdio.no_out_queues = known_devices[i][8]; | 1197 | card->qdio.no_out_queues = known_devices[i][8]; |
1176 | card->info.is_multicast_different = known_devices[i][9]; | 1198 | card->info.is_multicast_different = known_devices[i][9]; |
1177 | if (qeth_is_1920_device(card)) { | 1199 | if (qeth_is_1920_device(card)) { |
1178 | PRINT_INFO("Priority Queueing not able " | 1200 | dev_info(&card->gdev->dev, |
1179 | "due to hardware limitations!\n"); | 1201 | "Priority Queueing not supported\n"); |
1180 | card->qdio.no_out_queues = 1; | 1202 | card->qdio.no_out_queues = 1; |
1181 | card->qdio.default_out_queue = 0; | 1203 | card->qdio.default_out_queue = 0; |
1182 | } | 1204 | } |
@@ -1185,7 +1207,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1185 | i++; | 1207 | i++; |
1186 | } | 1208 | } |
1187 | card->info.type = QETH_CARD_TYPE_UNKNOWN; | 1209 | card->info.type = QETH_CARD_TYPE_UNKNOWN; |
1188 | PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); | 1210 | dev_err(&card->gdev->dev, "The adapter hardware is of an " |
1211 | "unknown type\n"); | ||
1189 | return -ENOENT; | 1212 | return -ENOENT; |
1190 | } | 1213 | } |
1191 | 1214 | ||
@@ -1368,8 +1391,8 @@ static int qeth_get_unitaddr(struct qeth_card *card) | |||
1368 | QETH_DBF_TEXT(SETUP, 2, "getunit"); | 1391 | QETH_DBF_TEXT(SETUP, 2, "getunit"); |
1369 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); | 1392 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); |
1370 | if (rc) { | 1393 | if (rc) { |
1371 | PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", | 1394 | QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", |
1372 | CARD_DDEV_ID(card), rc); | 1395 | dev_name(&card->gdev->dev), rc); |
1373 | return rc; | 1396 | return rc; |
1374 | } | 1397 | } |
1375 | card->info.chpid = prcd[30]; | 1398 | card->info.chpid = prcd[30]; |
@@ -1519,7 +1542,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1519 | if (rc == -ERESTARTSYS) | 1542 | if (rc == -ERESTARTSYS) |
1520 | return rc; | 1543 | return rc; |
1521 | if (channel->state != CH_STATE_ACTIVATING) { | 1544 | if (channel->state != CH_STATE_ACTIVATING) { |
1522 | PRINT_WARN("IDX activate timed out!\n"); | 1545 | dev_warn(&channel->ccwdev->dev, "The qeth device driver" |
1546 | " failed to recover an error on the device\n"); | ||
1547 | QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", | ||
1548 | dev_name(&channel->ccwdev->dev)); | ||
1523 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); | 1549 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); |
1524 | qeth_clear_cmd_buffers(channel); | 1550 | qeth_clear_cmd_buffers(channel); |
1525 | return -ETIME; | 1551 | return -ETIME; |
@@ -1552,20 +1578,21 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, | |||
1552 | 1578 | ||
1553 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1579 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1554 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1580 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1555 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1581 | dev_err(&card->write.ccwdev->dev, |
1556 | "adapter exclusively used by another host\n", | 1582 | "The adapter is used exclusively by another " |
1557 | CARD_WDEV_ID(card)); | 1583 | "host\n"); |
1558 | else | 1584 | else |
1559 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1585 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" |
1560 | "negative reply\n", CARD_WDEV_ID(card)); | 1586 | " negative reply\n", |
1587 | dev_name(&card->write.ccwdev->dev)); | ||
1561 | goto out; | 1588 | goto out; |
1562 | } | 1589 | } |
1563 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1590 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1564 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { | 1591 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { |
1565 | PRINT_WARN("IDX_ACTIVATE on write channel device %s: " | 1592 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " |
1566 | "function level mismatch " | 1593 | "function level mismatch (sent: 0x%x, received: " |
1567 | "(sent: 0x%x, received: 0x%x)\n", | 1594 | "0x%x)\n", dev_name(&card->write.ccwdev->dev), |
1568 | CARD_WDEV_ID(card), card->info.func_level, temp); | 1595 | card->info.func_level, temp); |
1569 | goto out; | 1596 | goto out; |
1570 | } | 1597 | } |
1571 | channel->state = CH_STATE_UP; | 1598 | channel->state = CH_STATE_UP; |
@@ -1591,12 +1618,13 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1591 | 1618 | ||
1592 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1619 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1593 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1620 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1594 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1621 | dev_err(&card->write.ccwdev->dev, |
1595 | "adapter exclusively used by another host\n", | 1622 | "The adapter is used exclusively by another " |
1596 | CARD_RDEV_ID(card)); | 1623 | "host\n"); |
1597 | else | 1624 | else |
1598 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1625 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" |
1599 | "negative reply\n", CARD_RDEV_ID(card)); | 1626 | " negative reply\n", |
1627 | dev_name(&card->read.ccwdev->dev)); | ||
1600 | goto out; | 1628 | goto out; |
1601 | } | 1629 | } |
1602 | 1630 | ||
@@ -1610,9 +1638,10 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1610 | 1638 | ||
1611 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1639 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1612 | if (temp != qeth_peer_func_level(card->info.func_level)) { | 1640 | if (temp != qeth_peer_func_level(card->info.func_level)) { |
1613 | PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " | 1641 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " |
1614 | "level mismatch (sent: 0x%x, received: 0x%x)\n", | 1642 | "level mismatch (sent: 0x%x, received: 0x%x)\n", |
1615 | CARD_RDEV_ID(card), card->info.func_level, temp); | 1643 | dev_name(&card->read.ccwdev->dev), |
1644 | card->info.func_level, temp); | ||
1616 | goto out; | 1645 | goto out; |
1617 | } | 1646 | } |
1618 | memcpy(&card->token.issuer_rm_r, | 1647 | memcpy(&card->token.issuer_rm_r, |
@@ -1686,8 +1715,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1686 | (addr_t) iob, 0, 0); | 1715 | (addr_t) iob, 0, 0); |
1687 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); | 1716 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); |
1688 | if (rc) { | 1717 | if (rc) { |
1689 | PRINT_WARN("qeth_send_control_data: " | 1718 | QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " |
1690 | "ccw_device_start rc = %i\n", rc); | 1719 | "ccw_device_start rc = %i\n", |
1720 | dev_name(&card->write.ccwdev->dev), rc); | ||
1691 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1721 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1692 | spin_lock_irqsave(&card->lock, flags); | 1722 | spin_lock_irqsave(&card->lock, flags); |
1693 | list_del_init(&reply->list); | 1723 | list_del_init(&reply->list); |
@@ -2170,11 +2200,8 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2170 | dbf_text[i] = | 2200 | dbf_text[i] = |
2171 | (char) _ebcasc[(__u8) dbf_text[i]]; | 2201 | (char) _ebcasc[(__u8) dbf_text[i]]; |
2172 | dbf_text[8] = 0; | 2202 | dbf_text[8] = 0; |
2173 | PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n" | 2203 | dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" |
2174 | "with link type %s (portname: %s)\n", | 2204 | "with link type %s (portname: %s)\n", |
2175 | CARD_RDEV_ID(card), | ||
2176 | CARD_WDEV_ID(card), | ||
2177 | CARD_DDEV_ID(card), | ||
2178 | qeth_get_cardname(card), | 2205 | qeth_get_cardname(card), |
2179 | (card->info.mcl_level[0]) ? " (level: " : "", | 2206 | (card->info.mcl_level[0]) ? " (level: " : "", |
2180 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2207 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2187,23 +2214,17 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2187 | static void qeth_print_status_no_portname(struct qeth_card *card) | 2214 | static void qeth_print_status_no_portname(struct qeth_card *card) |
2188 | { | 2215 | { |
2189 | if (card->info.portname[0]) | 2216 | if (card->info.portname[0]) |
2190 | PRINT_INFO("Device %s/%s/%s is a%s " | 2217 | dev_info(&card->gdev->dev, "Device is a%s " |
2191 | "card%s%s%s\nwith link type %s " | 2218 | "card%s%s%s\nwith link type %s " |
2192 | "(no portname needed by interface).\n", | 2219 | "(no portname needed by interface).\n", |
2193 | CARD_RDEV_ID(card), | ||
2194 | CARD_WDEV_ID(card), | ||
2195 | CARD_DDEV_ID(card), | ||
2196 | qeth_get_cardname(card), | 2220 | qeth_get_cardname(card), |
2197 | (card->info.mcl_level[0]) ? " (level: " : "", | 2221 | (card->info.mcl_level[0]) ? " (level: " : "", |
2198 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2222 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
2199 | (card->info.mcl_level[0]) ? ")" : "", | 2223 | (card->info.mcl_level[0]) ? ")" : "", |
2200 | qeth_get_cardname_short(card)); | 2224 | qeth_get_cardname_short(card)); |
2201 | else | 2225 | else |
2202 | PRINT_INFO("Device %s/%s/%s is a%s " | 2226 | dev_info(&card->gdev->dev, "Device is a%s " |
2203 | "card%s%s%s\nwith link type %s.\n", | 2227 | "card%s%s%s\nwith link type %s.\n", |
2204 | CARD_RDEV_ID(card), | ||
2205 | CARD_WDEV_ID(card), | ||
2206 | CARD_DDEV_ID(card), | ||
2207 | qeth_get_cardname(card), | 2228 | qeth_get_cardname(card), |
2208 | (card->info.mcl_level[0]) ? " (level: " : "", | 2229 | (card->info.mcl_level[0]) ? " (level: " : "", |
2209 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2230 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2325,7 +2346,6 @@ static int qeth_init_input_buffer(struct qeth_card *card, | |||
2325 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off | 2346 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off |
2326 | * buffers | 2347 | * buffers |
2327 | */ | 2348 | */ |
2328 | BUG_ON(!pool_entry); | ||
2329 | 2349 | ||
2330 | buf->pool_entry = pool_entry; | 2350 | buf->pool_entry = pool_entry; |
2331 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { | 2351 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { |
@@ -2630,9 +2650,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2630 | qeth_get_micros() - | 2650 | qeth_get_micros() - |
2631 | card->perf_stats.inbound_do_qdio_start_time; | 2651 | card->perf_stats.inbound_do_qdio_start_time; |
2632 | if (rc) { | 2652 | if (rc) { |
2633 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " | 2653 | dev_warn(&card->gdev->dev, |
2634 | "return %i (device %s).\n", | 2654 | "QDIO reported an error, rc=%i\n", rc); |
2635 | rc, CARD_DDEV_ID(card)); | ||
2636 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); | 2655 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); |
2637 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | 2656 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
2638 | } | 2657 | } |
@@ -3730,6 +3749,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
3730 | free_netdev(card->dev); | 3749 | free_netdev(card->dev); |
3731 | kfree(card->ip_tbd_list); | 3750 | kfree(card->ip_tbd_list); |
3732 | qeth_free_qdio_buffers(card); | 3751 | qeth_free_qdio_buffers(card); |
3752 | unregister_service_level(&card->qeth_service_level); | ||
3733 | kfree(card); | 3753 | kfree(card); |
3734 | } | 3754 | } |
3735 | 3755 | ||
@@ -3757,7 +3777,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, | |||
3757 | 3777 | ||
3758 | int qeth_core_hardsetup_card(struct qeth_card *card) | 3778 | int qeth_core_hardsetup_card(struct qeth_card *card) |
3759 | { | 3779 | { |
3760 | struct qdio_ssqd_desc *qdio_ssqd; | 3780 | struct qdio_ssqd_desc *ssqd; |
3761 | int retries = 3; | 3781 | int retries = 3; |
3762 | int mpno = 0; | 3782 | int mpno = 0; |
3763 | int rc; | 3783 | int rc; |
@@ -3766,7 +3786,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card) | |||
3766 | atomic_set(&card->force_alloc_skb, 0); | 3786 | atomic_set(&card->force_alloc_skb, 0); |
3767 | retry: | 3787 | retry: |
3768 | if (retries < 3) { | 3788 | if (retries < 3) { |
3769 | PRINT_WARN("Retrying to do IDX activates.\n"); | 3789 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", |
3790 | dev_name(&card->gdev->dev)); | ||
3770 | ccw_device_set_offline(CARD_DDEV(card)); | 3791 | ccw_device_set_offline(CARD_DDEV(card)); |
3771 | ccw_device_set_offline(CARD_WDEV(card)); | 3792 | ccw_device_set_offline(CARD_WDEV(card)); |
3772 | ccw_device_set_offline(CARD_RDEV(card)); | 3793 | ccw_device_set_offline(CARD_RDEV(card)); |
@@ -3792,9 +3813,16 @@ retry: | |||
3792 | return rc; | 3813 | return rc; |
3793 | } | 3814 | } |
3794 | 3815 | ||
3795 | qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); | 3816 | ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL); |
3796 | if (qdio_ssqd) | 3817 | if (!ssqd) { |
3797 | mpno = qdio_ssqd->pcnt; | 3818 | rc = -ENOMEM; |
3819 | goto out; | ||
3820 | } | ||
3821 | rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd); | ||
3822 | if (rc == 0) | ||
3823 | mpno = ssqd->pcnt; | ||
3824 | kfree(ssqd); | ||
3825 | |||
3798 | if (mpno) | 3826 | if (mpno) |
3799 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | 3827 | mpno = min(mpno - 1, QETH_MAX_PORTNO); |
3800 | if (card->info.portno > mpno) { | 3828 | if (card->info.portno > mpno) { |
@@ -3834,7 +3862,10 @@ retry: | |||
3834 | } | 3862 | } |
3835 | return 0; | 3863 | return 0; |
3836 | out: | 3864 | out: |
3837 | PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc); | 3865 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
3866 | "an error on the device\n"); | ||
3867 | QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", | ||
3868 | dev_name(&card->gdev->dev), rc); | ||
3838 | return rc; | 3869 | return rc; |
3839 | } | 3870 | } |
3840 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); | 3871 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); |
@@ -4054,8 +4085,8 @@ int qeth_core_load_discipline(struct qeth_card *card, | |||
4054 | break; | 4085 | break; |
4055 | } | 4086 | } |
4056 | if (!card->discipline.ccwgdriver) { | 4087 | if (!card->discipline.ccwgdriver) { |
4057 | PRINT_ERR("Support for discipline %d not present\n", | 4088 | dev_err(&card->gdev->dev, "There is no kernel module to " |
4058 | discipline); | 4089 | "support discipline %d\n", discipline); |
4059 | rc = -EINVAL; | 4090 | rc = -EINVAL; |
4060 | } | 4091 | } |
4061 | return rc; | 4092 | return rc; |
@@ -4448,7 +4479,7 @@ static int __init qeth_core_init(void) | |||
4448 | { | 4479 | { |
4449 | int rc; | 4480 | int rc; |
4450 | 4481 | ||
4451 | PRINT_INFO("loading core functions\n"); | 4482 | pr_info("loading core functions\n"); |
4452 | INIT_LIST_HEAD(&qeth_core_card_list.list); | 4483 | INIT_LIST_HEAD(&qeth_core_card_list.list); |
4453 | rwlock_init(&qeth_core_card_list.rwlock); | 4484 | rwlock_init(&qeth_core_card_list.rwlock); |
4454 | 4485 | ||
@@ -4488,9 +4519,10 @@ driver_err: | |||
4488 | ccwgroup_err: | 4519 | ccwgroup_err: |
4489 | ccw_driver_unregister(&qeth_ccw_driver); | 4520 | ccw_driver_unregister(&qeth_ccw_driver); |
4490 | ccw_err: | 4521 | ccw_err: |
4522 | QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); | ||
4491 | qeth_unregister_dbf_views(); | 4523 | qeth_unregister_dbf_views(); |
4492 | out_err: | 4524 | out_err: |
4493 | PRINT_ERR("Initialization failed with code %d\n", rc); | 4525 | pr_err("Initializing the qeth device driver failed\n"); |
4494 | return rc; | 4526 | return rc; |
4495 | } | 4527 | } |
4496 | 4528 | ||
@@ -4503,7 +4535,7 @@ static void __exit qeth_core_exit(void) | |||
4503 | ccw_driver_unregister(&qeth_ccw_driver); | 4535 | ccw_driver_unregister(&qeth_ccw_driver); |
4504 | kmem_cache_destroy(qeth_core_header_cache); | 4536 | kmem_cache_destroy(qeth_core_header_cache); |
4505 | qeth_unregister_dbf_views(); | 4537 | qeth_unregister_dbf_views(); |
4506 | PRINT_INFO("core functions removed\n"); | 4538 | pr_info("core functions removed\n"); |
4507 | } | 4539 | } |
4508 | 4540 | ||
4509 | module_init(qeth_core_init); | 4541 | module_init(qeth_core_init); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8a8fad7a8bea..2c48591ced44 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -497,12 +500,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
497 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | 500 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; |
498 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | 501 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, |
499 | OSA_ADDR_LEN); | 502 | OSA_ADDR_LEN); |
500 | PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " | 503 | dev_info(&card->gdev->dev, |
501 | "successfully registered on device %s\n", | 504 | "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " |
502 | card->dev->dev_addr[0], card->dev->dev_addr[1], | 505 | "successfully registered on device %s\n", |
503 | card->dev->dev_addr[2], card->dev->dev_addr[3], | 506 | card->dev->dev_addr[0], card->dev->dev_addr[1], |
504 | card->dev->dev_addr[4], card->dev->dev_addr[5], | 507 | card->dev->dev_addr[2], card->dev->dev_addr[3], |
505 | card->dev->name); | 508 | card->dev->dev_addr[4], card->dev->dev_addr[5], |
509 | card->dev->name); | ||
506 | } | 510 | } |
507 | return 0; | 511 | return 0; |
508 | } | 512 | } |
@@ -1009,9 +1013,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1009 | if (rc) { | 1013 | if (rc) { |
1010 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1014 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1011 | if (rc == 0xe080) { | 1015 | if (rc == 0xe080) { |
1012 | PRINT_WARN("LAN on card %s if offline! " | 1016 | dev_warn(&card->gdev->dev, |
1013 | "Waiting for STARTLAN from card.\n", | 1017 | "The LAN is offline\n"); |
1014 | CARD_BUS_ID(card)); | ||
1015 | card->lan_online = 0; | 1018 | card->lan_online = 0; |
1016 | } | 1019 | } |
1017 | return rc; | 1020 | return rc; |
@@ -1111,8 +1114,8 @@ static int qeth_l2_recover(void *ptr) | |||
1111 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 1114 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
1112 | return 0; | 1115 | return 0; |
1113 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 1116 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
1114 | PRINT_WARN("Recovery of device %s started ...\n", | 1117 | dev_warn(&card->gdev->dev, |
1115 | CARD_BUS_ID(card)); | 1118 | "A recovery process has been started for the device\n"); |
1116 | card->use_hard_stop = 1; | 1119 | card->use_hard_stop = 1; |
1117 | __qeth_l2_set_offline(card->gdev, 1); | 1120 | __qeth_l2_set_offline(card->gdev, 1); |
1118 | rc = __qeth_l2_set_online(card->gdev, 1); | 1121 | rc = __qeth_l2_set_online(card->gdev, 1); |
@@ -1120,27 +1123,27 @@ static int qeth_l2_recover(void *ptr) | |||
1120 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1123 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1121 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1124 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1122 | if (!rc) | 1125 | if (!rc) |
1123 | PRINT_INFO("Device %s successfully recovered!\n", | 1126 | dev_info(&card->gdev->dev, |
1124 | CARD_BUS_ID(card)); | 1127 | "Device successfully recovered!\n"); |
1125 | else { | 1128 | else { |
1126 | rtnl_lock(); | 1129 | rtnl_lock(); |
1127 | dev_close(card->dev); | 1130 | dev_close(card->dev); |
1128 | rtnl_unlock(); | 1131 | rtnl_unlock(); |
1129 | PRINT_INFO("Device %s could not be recovered!\n", | 1132 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1130 | CARD_BUS_ID(card)); | 1133 | "failed to recover an error on the device\n"); |
1131 | } | 1134 | } |
1132 | return 0; | 1135 | return 0; |
1133 | } | 1136 | } |
1134 | 1137 | ||
1135 | static int __init qeth_l2_init(void) | 1138 | static int __init qeth_l2_init(void) |
1136 | { | 1139 | { |
1137 | PRINT_INFO("register layer 2 discipline\n"); | 1140 | pr_info("register layer 2 discipline\n"); |
1138 | return 0; | 1141 | return 0; |
1139 | } | 1142 | } |
1140 | 1143 | ||
1141 | static void __exit qeth_l2_exit(void) | 1144 | static void __exit qeth_l2_exit(void) |
1142 | { | 1145 | { |
1143 | PRINT_INFO("unregister layer 2 discipline\n"); | 1146 | pr_info("unregister layer 2 discipline\n"); |
1144 | } | 1147 | } |
1145 | 1148 | ||
1146 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | 1149 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ed59fedd5922..c0b30b25a5f1 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -917,8 +920,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
917 | if (rc) { | 920 | if (rc) { |
918 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); | 921 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); |
919 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 922 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
920 | PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", | 923 | dev_warn(&card->gdev->dev, |
921 | buf, rc, rc); | 924 | "Registering IP address %s failed\n", buf); |
922 | } | 925 | } |
923 | return rc; | 926 | return rc; |
924 | } | 927 | } |
@@ -1029,24 +1032,22 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) | |||
1029 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); | 1032 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); |
1030 | 1033 | ||
1031 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { | 1034 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
1032 | PRINT_WARN("set adapter parameters not supported " | 1035 | dev_info(&card->gdev->dev, |
1033 | "on device %s.\n", | 1036 | "set adapter parameters not supported.\n"); |
1034 | CARD_BUS_ID(card)); | ||
1035 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); | 1037 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); |
1036 | return 0; | 1038 | return 0; |
1037 | } | 1039 | } |
1038 | rc = qeth_query_setadapterparms(card); | 1040 | rc = qeth_query_setadapterparms(card); |
1039 | if (rc) { | 1041 | if (rc) { |
1040 | PRINT_WARN("couldn't set adapter parameters on device %s: " | 1042 | QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " |
1041 | "x%x\n", CARD_BUS_ID(card), rc); | 1043 | "0x%x\n", card->gdev->dev.bus_id, rc); |
1042 | return rc; | 1044 | return rc; |
1043 | } | 1045 | } |
1044 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { | 1046 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { |
1045 | rc = qeth_setadpparms_change_macaddr(card); | 1047 | rc = qeth_setadpparms_change_macaddr(card); |
1046 | if (rc) | 1048 | if (rc) |
1047 | PRINT_WARN("couldn't get MAC address on " | 1049 | dev_warn(&card->gdev->dev, "Reading the adapter MAC" |
1048 | "device %s: x%x\n", | 1050 | " address failed\n", rc); |
1049 | CARD_BUS_ID(card), rc); | ||
1050 | } | 1051 | } |
1051 | 1052 | ||
1052 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 1053 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
@@ -1160,16 +1161,17 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) | |||
1160 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); | 1161 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); |
1161 | 1162 | ||
1162 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 1163 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
1163 | PRINT_WARN("ARP processing not supported " | 1164 | dev_info(&card->gdev->dev, |
1164 | "on %s!\n", QETH_CARD_IFNAME(card)); | 1165 | "ARP processing not supported on %s!\n", |
1166 | QETH_CARD_IFNAME(card)); | ||
1165 | return 0; | 1167 | return 0; |
1166 | } | 1168 | } |
1167 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 1169 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
1168 | IPA_CMD_ASS_START, 0); | 1170 | IPA_CMD_ASS_START, 0); |
1169 | if (rc) { | 1171 | if (rc) { |
1170 | PRINT_WARN("Could not start ARP processing " | 1172 | dev_warn(&card->gdev->dev, |
1171 | "assist on %s: 0x%x\n", | 1173 | "Starting ARP processing support for %s failed\n", |
1172 | QETH_CARD_IFNAME(card), rc); | 1174 | QETH_CARD_IFNAME(card)); |
1173 | } | 1175 | } |
1174 | return rc; | 1176 | return rc; |
1175 | } | 1177 | } |
@@ -1181,19 +1183,21 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) | |||
1181 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); | 1183 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); |
1182 | 1184 | ||
1183 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { | 1185 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { |
1184 | PRINT_INFO("Hardware IP fragmentation not supported on %s\n", | 1186 | dev_info(&card->gdev->dev, |
1185 | QETH_CARD_IFNAME(card)); | 1187 | "Hardware IP fragmentation not supported on %s\n", |
1188 | QETH_CARD_IFNAME(card)); | ||
1186 | return -EOPNOTSUPP; | 1189 | return -EOPNOTSUPP; |
1187 | } | 1190 | } |
1188 | 1191 | ||
1189 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, | 1192 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, |
1190 | IPA_CMD_ASS_START, 0); | 1193 | IPA_CMD_ASS_START, 0); |
1191 | if (rc) { | 1194 | if (rc) { |
1192 | PRINT_WARN("Could not start Hardware IP fragmentation " | 1195 | dev_warn(&card->gdev->dev, |
1193 | "assist on %s: 0x%x\n", | 1196 | "Starting IP fragmentation support for %s failed\n", |
1194 | QETH_CARD_IFNAME(card), rc); | 1197 | QETH_CARD_IFNAME(card)); |
1195 | } else | 1198 | } else |
1196 | PRINT_INFO("Hardware IP fragmentation enabled \n"); | 1199 | dev_info(&card->gdev->dev, |
1200 | "Hardware IP fragmentation enabled \n"); | ||
1197 | return rc; | 1201 | return rc; |
1198 | } | 1202 | } |
1199 | 1203 | ||
@@ -1207,17 +1211,18 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) | |||
1207 | return -EOPNOTSUPP; | 1211 | return -EOPNOTSUPP; |
1208 | 1212 | ||
1209 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { | 1213 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { |
1210 | PRINT_INFO("Inbound source address not " | 1214 | dev_info(&card->gdev->dev, |
1211 | "supported on %s\n", QETH_CARD_IFNAME(card)); | 1215 | "Inbound source address not supported on %s\n", |
1216 | QETH_CARD_IFNAME(card)); | ||
1212 | return -EOPNOTSUPP; | 1217 | return -EOPNOTSUPP; |
1213 | } | 1218 | } |
1214 | 1219 | ||
1215 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, | 1220 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, |
1216 | IPA_CMD_ASS_START, 0); | 1221 | IPA_CMD_ASS_START, 0); |
1217 | if (rc) | 1222 | if (rc) |
1218 | PRINT_WARN("Could not start inbound source " | 1223 | dev_warn(&card->gdev->dev, |
1219 | "assist on %s: 0x%x\n", | 1224 | "Starting proxy ARP support for %s failed\n", |
1220 | QETH_CARD_IFNAME(card), rc); | 1225 | QETH_CARD_IFNAME(card)); |
1221 | return rc; | 1226 | return rc; |
1222 | } | 1227 | } |
1223 | 1228 | ||
@@ -1228,19 +1233,19 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) | |||
1228 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); | 1233 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); |
1229 | 1234 | ||
1230 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { | 1235 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { |
1231 | PRINT_WARN("VLAN not supported on %s\n", | 1236 | dev_info(&card->gdev->dev, |
1232 | QETH_CARD_IFNAME(card)); | 1237 | "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); |
1233 | return -EOPNOTSUPP; | 1238 | return -EOPNOTSUPP; |
1234 | } | 1239 | } |
1235 | 1240 | ||
1236 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, | 1241 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, |
1237 | IPA_CMD_ASS_START, 0); | 1242 | IPA_CMD_ASS_START, 0); |
1238 | if (rc) { | 1243 | if (rc) { |
1239 | PRINT_WARN("Could not start vlan " | 1244 | dev_warn(&card->gdev->dev, |
1240 | "assist on %s: 0x%x\n", | 1245 | "Starting VLAN support for %s failed\n", |
1241 | QETH_CARD_IFNAME(card), rc); | 1246 | QETH_CARD_IFNAME(card)); |
1242 | } else { | 1247 | } else { |
1243 | PRINT_INFO("VLAN enabled \n"); | 1248 | dev_info(&card->gdev->dev, "VLAN enabled\n"); |
1244 | } | 1249 | } |
1245 | return rc; | 1250 | return rc; |
1246 | } | 1251 | } |
@@ -1252,19 +1257,20 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) | |||
1252 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); | 1257 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); |
1253 | 1258 | ||
1254 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { | 1259 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { |
1255 | PRINT_WARN("Multicast not supported on %s\n", | 1260 | dev_info(&card->gdev->dev, |
1256 | QETH_CARD_IFNAME(card)); | 1261 | "Multicast not supported on %s\n", |
1262 | QETH_CARD_IFNAME(card)); | ||
1257 | return -EOPNOTSUPP; | 1263 | return -EOPNOTSUPP; |
1258 | } | 1264 | } |
1259 | 1265 | ||
1260 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, | 1266 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, |
1261 | IPA_CMD_ASS_START, 0); | 1267 | IPA_CMD_ASS_START, 0); |
1262 | if (rc) { | 1268 | if (rc) { |
1263 | PRINT_WARN("Could not start multicast " | 1269 | dev_warn(&card->gdev->dev, |
1264 | "assist on %s: rc=%i\n", | 1270 | "Starting multicast support for %s failed\n", |
1265 | QETH_CARD_IFNAME(card), rc); | 1271 | QETH_CARD_IFNAME(card)); |
1266 | } else { | 1272 | } else { |
1267 | PRINT_INFO("Multicast enabled\n"); | 1273 | dev_info(&card->gdev->dev, "Multicast enabled\n"); |
1268 | card->dev->flags |= IFF_MULTICAST; | 1274 | card->dev->flags |= IFF_MULTICAST; |
1269 | } | 1275 | } |
1270 | return rc; | 1276 | return rc; |
@@ -1315,36 +1321,37 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) | |||
1315 | 1321 | ||
1316 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); | 1322 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); |
1317 | if (rc) { | 1323 | if (rc) { |
1318 | PRINT_ERR("IPv6 query ipassist failed on %s\n", | 1324 | dev_err(&card->gdev->dev, |
1319 | QETH_CARD_IFNAME(card)); | 1325 | "Activating IPv6 support for %s failed\n", |
1326 | QETH_CARD_IFNAME(card)); | ||
1320 | return rc; | 1327 | return rc; |
1321 | } | 1328 | } |
1322 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, | 1329 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, |
1323 | IPA_CMD_ASS_START, 3); | 1330 | IPA_CMD_ASS_START, 3); |
1324 | if (rc) { | 1331 | if (rc) { |
1325 | PRINT_WARN("IPv6 start assist (version 4) failed " | 1332 | dev_err(&card->gdev->dev, |
1326 | "on %s: 0x%x\n", | 1333 | "Activating IPv6 support for %s failed\n", |
1327 | QETH_CARD_IFNAME(card), rc); | 1334 | QETH_CARD_IFNAME(card)); |
1328 | return rc; | 1335 | return rc; |
1329 | } | 1336 | } |
1330 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, | 1337 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, |
1331 | IPA_CMD_ASS_START); | 1338 | IPA_CMD_ASS_START); |
1332 | if (rc) { | 1339 | if (rc) { |
1333 | PRINT_WARN("IPV6 start assist (version 6) failed " | 1340 | dev_err(&card->gdev->dev, |
1334 | "on %s: 0x%x\n", | 1341 | "Activating IPv6 support for %s failed\n", |
1335 | QETH_CARD_IFNAME(card), rc); | 1342 | QETH_CARD_IFNAME(card)); |
1336 | return rc; | 1343 | return rc; |
1337 | } | 1344 | } |
1338 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, | 1345 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, |
1339 | IPA_CMD_ASS_START); | 1346 | IPA_CMD_ASS_START); |
1340 | if (rc) { | 1347 | if (rc) { |
1341 | PRINT_WARN("Could not enable passthrough " | 1348 | dev_warn(&card->gdev->dev, |
1342 | "on %s: 0x%x\n", | 1349 | "Enabling the passthrough mode for %s failed\n", |
1343 | QETH_CARD_IFNAME(card), rc); | 1350 | QETH_CARD_IFNAME(card)); |
1344 | return rc; | 1351 | return rc; |
1345 | } | 1352 | } |
1346 | out: | 1353 | out: |
1347 | PRINT_INFO("IPV6 enabled \n"); | 1354 | dev_info(&card->gdev->dev, "IPV6 enabled\n"); |
1348 | return 0; | 1355 | return 0; |
1349 | } | 1356 | } |
1350 | #endif | 1357 | #endif |
@@ -1356,8 +1363,8 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) | |||
1356 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); | 1363 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); |
1357 | 1364 | ||
1358 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1365 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1359 | PRINT_WARN("IPv6 not supported on %s\n", | 1366 | dev_info(&card->gdev->dev, |
1360 | QETH_CARD_IFNAME(card)); | 1367 | "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); |
1361 | return 0; | 1368 | return 0; |
1362 | } | 1369 | } |
1363 | #ifdef CONFIG_QETH_IPV6 | 1370 | #ifdef CONFIG_QETH_IPV6 |
@@ -1373,34 +1380,35 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) | |||
1373 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); | 1380 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); |
1374 | card->info.broadcast_capable = 0; | 1381 | card->info.broadcast_capable = 0; |
1375 | if (!qeth_is_supported(card, IPA_FILTERING)) { | 1382 | if (!qeth_is_supported(card, IPA_FILTERING)) { |
1376 | PRINT_WARN("Broadcast not supported on %s\n", | 1383 | dev_info(&card->gdev->dev, |
1377 | QETH_CARD_IFNAME(card)); | 1384 | "Broadcast not supported on %s\n", |
1385 | QETH_CARD_IFNAME(card)); | ||
1378 | rc = -EOPNOTSUPP; | 1386 | rc = -EOPNOTSUPP; |
1379 | goto out; | 1387 | goto out; |
1380 | } | 1388 | } |
1381 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1389 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1382 | IPA_CMD_ASS_START, 0); | 1390 | IPA_CMD_ASS_START, 0); |
1383 | if (rc) { | 1391 | if (rc) { |
1384 | PRINT_WARN("Could not enable broadcasting filtering " | 1392 | dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " |
1385 | "on %s: 0x%x\n", | 1393 | "%s failed\n", QETH_CARD_IFNAME(card)); |
1386 | QETH_CARD_IFNAME(card), rc); | ||
1387 | goto out; | 1394 | goto out; |
1388 | } | 1395 | } |
1389 | 1396 | ||
1390 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1397 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1391 | IPA_CMD_ASS_CONFIGURE, 1); | 1398 | IPA_CMD_ASS_CONFIGURE, 1); |
1392 | if (rc) { | 1399 | if (rc) { |
1393 | PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n", | 1400 | dev_warn(&card->gdev->dev, |
1394 | QETH_CARD_IFNAME(card), rc); | 1401 | "Setting up broadcast filtering for %s failed\n", |
1402 | QETH_CARD_IFNAME(card)); | ||
1395 | goto out; | 1403 | goto out; |
1396 | } | 1404 | } |
1397 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; | 1405 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; |
1398 | PRINT_INFO("Broadcast enabled \n"); | 1406 | dev_info(&card->gdev->dev, "Broadcast enabled\n"); |
1399 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1407 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1400 | IPA_CMD_ASS_ENABLE, 1); | 1408 | IPA_CMD_ASS_ENABLE, 1); |
1401 | if (rc) { | 1409 | if (rc) { |
1402 | PRINT_WARN("Could not set up broadcast echo filtering on " | 1410 | dev_warn(&card->gdev->dev, "Setting up broadcast echo " |
1403 | "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc); | 1411 | "filtering for %s failed\n", QETH_CARD_IFNAME(card)); |
1404 | goto out; | 1412 | goto out; |
1405 | } | 1413 | } |
1406 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; | 1414 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; |
@@ -1419,18 +1427,18 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) | |||
1419 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1420 | IPA_CMD_ASS_START, 0); | 1428 | IPA_CMD_ASS_START, 0); |
1421 | if (rc) { | 1429 | if (rc) { |
1422 | PRINT_WARN("Starting Inbound HW Checksumming failed on %s: " | 1430 | dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " |
1423 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1431 | "failed, using SW checksumming\n", |
1424 | QETH_CARD_IFNAME(card), rc); | 1432 | QETH_CARD_IFNAME(card)); |
1425 | return rc; | 1433 | return rc; |
1426 | } | 1434 | } |
1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1435 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1428 | IPA_CMD_ASS_ENABLE, | 1436 | IPA_CMD_ASS_ENABLE, |
1429 | card->info.csum_mask); | 1437 | card->info.csum_mask); |
1430 | if (rc) { | 1438 | if (rc) { |
1431 | PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: " | 1439 | dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " |
1432 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1440 | "failed, using SW checksumming\n", |
1433 | QETH_CARD_IFNAME(card), rc); | 1441 | QETH_CARD_IFNAME(card)); |
1434 | return rc; | 1442 | return rc; |
1435 | } | 1443 | } |
1436 | return 0; | 1444 | return 0; |
@@ -1443,26 +1451,30 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) | |||
1443 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); | 1451 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); |
1444 | 1452 | ||
1445 | if (card->options.checksum_type == NO_CHECKSUMMING) { | 1453 | if (card->options.checksum_type == NO_CHECKSUMMING) { |
1446 | PRINT_WARN("Using no checksumming on %s.\n", | 1454 | dev_info(&card->gdev->dev, |
1447 | QETH_CARD_IFNAME(card)); | 1455 | "Using no checksumming on %s.\n", |
1456 | QETH_CARD_IFNAME(card)); | ||
1448 | return 0; | 1457 | return 0; |
1449 | } | 1458 | } |
1450 | if (card->options.checksum_type == SW_CHECKSUMMING) { | 1459 | if (card->options.checksum_type == SW_CHECKSUMMING) { |
1451 | PRINT_WARN("Using SW checksumming on %s.\n", | 1460 | dev_info(&card->gdev->dev, |
1452 | QETH_CARD_IFNAME(card)); | 1461 | "Using SW checksumming on %s.\n", |
1462 | QETH_CARD_IFNAME(card)); | ||
1453 | return 0; | 1463 | return 0; |
1454 | } | 1464 | } |
1455 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { | 1465 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { |
1456 | PRINT_WARN("Inbound HW Checksumming not " | 1466 | dev_info(&card->gdev->dev, |
1457 | "supported on %s,\ncontinuing " | 1467 | "Inbound HW Checksumming not " |
1458 | "using Inbound SW Checksumming\n", | 1468 | "supported on %s,\ncontinuing " |
1459 | QETH_CARD_IFNAME(card)); | 1469 | "using Inbound SW Checksumming\n", |
1470 | QETH_CARD_IFNAME(card)); | ||
1460 | card->options.checksum_type = SW_CHECKSUMMING; | 1471 | card->options.checksum_type = SW_CHECKSUMMING; |
1461 | return 0; | 1472 | return 0; |
1462 | } | 1473 | } |
1463 | rc = qeth_l3_send_checksum_command(card); | 1474 | rc = qeth_l3_send_checksum_command(card); |
1464 | if (!rc) | 1475 | if (!rc) |
1465 | PRINT_INFO("HW Checksumming (inbound) enabled \n"); | 1476 | dev_info(&card->gdev->dev, |
1477 | "HW Checksumming (inbound) enabled\n"); | ||
1466 | 1478 | ||
1467 | return rc; | 1479 | return rc; |
1468 | } | 1480 | } |
@@ -1474,18 +1486,20 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1474 | QETH_DBF_TEXT(TRACE, 3, "sttso"); | 1486 | QETH_DBF_TEXT(TRACE, 3, "sttso"); |
1475 | 1487 | ||
1476 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 1488 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
1477 | PRINT_WARN("Outbound TSO not supported on %s\n", | 1489 | dev_info(&card->gdev->dev, |
1478 | QETH_CARD_IFNAME(card)); | 1490 | "Outbound TSO not supported on %s\n", |
1491 | QETH_CARD_IFNAME(card)); | ||
1479 | rc = -EOPNOTSUPP; | 1492 | rc = -EOPNOTSUPP; |
1480 | } else { | 1493 | } else { |
1481 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, | 1494 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, |
1482 | IPA_CMD_ASS_START, 0); | 1495 | IPA_CMD_ASS_START, 0); |
1483 | if (rc) | 1496 | if (rc) |
1484 | PRINT_WARN("Could not start outbound TSO " | 1497 | dev_warn(&card->gdev->dev, "Starting outbound TCP " |
1485 | "assist on %s: rc=%i\n", | 1498 | "segmentation offload for %s failed\n", |
1486 | QETH_CARD_IFNAME(card), rc); | 1499 | QETH_CARD_IFNAME(card)); |
1487 | else | 1500 | else |
1488 | PRINT_INFO("Outbound TSO enabled\n"); | 1501 | dev_info(&card->gdev->dev, |
1502 | "Outbound TSO enabled\n"); | ||
1489 | } | 1503 | } |
1490 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { | 1504 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { |
1491 | card->options.large_send = QETH_LARGE_SEND_NO; | 1505 | card->options.large_send = QETH_LARGE_SEND_NO; |
@@ -1578,12 +1592,8 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card, | |||
1578 | else { | 1592 | else { |
1579 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | | 1593 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | |
1580 | UNIQUE_ID_NOT_BY_CARD; | 1594 | UNIQUE_ID_NOT_BY_CARD; |
1581 | PRINT_WARN("couldn't get a unique id from the card on device " | 1595 | dev_warn(&card->gdev->dev, "The network adapter failed to " |
1582 | "%s (result=x%x), using default id. ipv6 " | 1596 | "generate a unique ID\n"); |
1583 | "autoconfig on other lpars may lead to duplicate " | ||
1584 | "ip addresses. please use manually " | ||
1585 | "configured ones.\n", | ||
1586 | CARD_BUS_ID(card), cmd->hdr.return_code); | ||
1587 | } | 1597 | } |
1588 | return 0; | 1598 | return 0; |
1589 | } | 1599 | } |
@@ -3086,9 +3096,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3086 | if (rc) { | 3096 | if (rc) { |
3087 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 3097 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3088 | if (rc == 0xe080) { | 3098 | if (rc == 0xe080) { |
3089 | PRINT_WARN("LAN on card %s if offline! " | 3099 | dev_warn(&card->gdev->dev, |
3090 | "Waiting for STARTLAN from card.\n", | 3100 | "The LAN is offline\n"); |
3091 | CARD_BUS_ID(card)); | ||
3092 | card->lan_online = 0; | 3101 | card->lan_online = 0; |
3093 | } | 3102 | } |
3094 | return rc; | 3103 | return rc; |
@@ -3194,8 +3203,8 @@ static int qeth_l3_recover(void *ptr) | |||
3194 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 3203 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
3195 | return 0; | 3204 | return 0; |
3196 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 3205 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
3197 | PRINT_WARN("Recovery of device %s started ...\n", | 3206 | dev_warn(&card->gdev->dev, |
3198 | CARD_BUS_ID(card)); | 3207 | "A recovery process has been started for the device\n"); |
3199 | card->use_hard_stop = 1; | 3208 | card->use_hard_stop = 1; |
3200 | __qeth_l3_set_offline(card->gdev, 1); | 3209 | __qeth_l3_set_offline(card->gdev, 1); |
3201 | rc = __qeth_l3_set_online(card->gdev, 1); | 3210 | rc = __qeth_l3_set_online(card->gdev, 1); |
@@ -3203,14 +3212,14 @@ static int qeth_l3_recover(void *ptr) | |||
3203 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3212 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3204 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3213 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3205 | if (!rc) | 3214 | if (!rc) |
3206 | PRINT_INFO("Device %s successfully recovered!\n", | 3215 | dev_info(&card->gdev->dev, |
3207 | CARD_BUS_ID(card)); | 3216 | "Device successfully recovered!\n"); |
3208 | else { | 3217 | else { |
3209 | rtnl_lock(); | 3218 | rtnl_lock(); |
3210 | dev_close(card->dev); | 3219 | dev_close(card->dev); |
3211 | rtnl_unlock(); | 3220 | rtnl_unlock(); |
3212 | PRINT_INFO("Device %s could not be recovered!\n", | 3221 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3213 | CARD_BUS_ID(card)); | 3222 | "failed to recover an error on the device\n"); |
3214 | } | 3223 | } |
3215 | return 0; | 3224 | return 0; |
3216 | } | 3225 | } |
@@ -3344,7 +3353,7 @@ static int qeth_l3_register_notifiers(void) | |||
3344 | return rc; | 3353 | return rc; |
3345 | } | 3354 | } |
3346 | #else | 3355 | #else |
3347 | PRINT_WARN("layer 3 discipline no IPv6 support\n"); | 3356 | pr_warning("There is no IPv6 support for the layer 3 discipline\n"); |
3348 | #endif | 3357 | #endif |
3349 | return 0; | 3358 | return 0; |
3350 | } | 3359 | } |
@@ -3363,7 +3372,7 @@ static int __init qeth_l3_init(void) | |||
3363 | { | 3372 | { |
3364 | int rc = 0; | 3373 | int rc = 0; |
3365 | 3374 | ||
3366 | PRINT_INFO("register layer 3 discipline\n"); | 3375 | pr_info("register layer 3 discipline\n"); |
3367 | rc = qeth_l3_register_notifiers(); | 3376 | rc = qeth_l3_register_notifiers(); |
3368 | return rc; | 3377 | return rc; |
3369 | } | 3378 | } |
@@ -3371,7 +3380,7 @@ static int __init qeth_l3_init(void) | |||
3371 | static void __exit qeth_l3_exit(void) | 3380 | static void __exit qeth_l3_exit(void) |
3372 | { | 3381 | { |
3373 | qeth_l3_unregister_notifiers(); | 3382 | qeth_l3_unregister_notifiers(); |
3374 | PRINT_INFO("unregister layer 3 discipline\n"); | 3383 | pr_info("unregister layer 3 discipline\n"); |
3375 | } | 3384 | } |
3376 | 3385 | ||
3377 | module_init(qeth_l3_init); | 3386 | module_init(qeth_l3_init); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3d4e3e3f3fc0..e529b55b3ce9 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -25,9 +25,15 @@ | |||
25 | * Sven Schuetz | 25 | * Sven Schuetz |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "zfcp" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/miscdevice.h> | 31 | #include <linux/miscdevice.h> |
32 | #include <linux/seq_file.h> | ||
29 | #include "zfcp_ext.h" | 33 | #include "zfcp_ext.h" |
30 | 34 | ||
35 | #define ZFCP_BUS_ID_SIZE 20 | ||
36 | |||
31 | static char *device; | 37 | static char *device; |
32 | 38 | ||
33 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); | 39 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); |
@@ -83,9 +89,9 @@ static int __init zfcp_device_setup(char *devstr) | |||
83 | strcpy(str, devstr); | 89 | strcpy(str, devstr); |
84 | 90 | ||
85 | token = strsep(&str, ","); | 91 | token = strsep(&str, ","); |
86 | if (!token || strlen(token) >= BUS_ID_SIZE) | 92 | if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) |
87 | goto err_out; | 93 | goto err_out; |
88 | strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); | 94 | strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE); |
89 | 95 | ||
90 | token = strsep(&str, ","); | 96 | token = strsep(&str, ","); |
91 | if (!token || strict_strtoull(token, 0, | 97 | if (!token || strict_strtoull(token, 0, |
@@ -102,7 +108,7 @@ static int __init zfcp_device_setup(char *devstr) | |||
102 | 108 | ||
103 | err_out: | 109 | err_out: |
104 | kfree(str); | 110 | kfree(str); |
105 | pr_err("zfcp: %s is not a valid SCSI device\n", devstr); | 111 | pr_err("%s is not a valid SCSI device\n", devstr); |
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
@@ -186,13 +192,13 @@ static int __init zfcp_module_init(void) | |||
186 | 192 | ||
187 | retval = misc_register(&zfcp_cfdc_misc); | 193 | retval = misc_register(&zfcp_cfdc_misc); |
188 | if (retval) { | 194 | if (retval) { |
189 | pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n"); | 195 | pr_err("Registering the misc device zfcp_cfdc failed\n"); |
190 | goto out_misc; | 196 | goto out_misc; |
191 | } | 197 | } |
192 | 198 | ||
193 | retval = zfcp_ccw_register(); | 199 | retval = zfcp_ccw_register(); |
194 | if (retval) { | 200 | if (retval) { |
195 | pr_err("zfcp: The zfcp device driver could not register with " | 201 | pr_err("The zfcp device driver could not register with " |
196 | "the common I/O layer\n"); | 202 | "the common I/O layer\n"); |
197 | goto out_ccw_register; | 203 | goto out_ccw_register; |
198 | } | 204 | } |
@@ -436,6 +442,16 @@ static void _zfcp_status_read_scheduler(struct work_struct *work) | |||
436 | stat_work)); | 442 | stat_work)); |
437 | } | 443 | } |
438 | 444 | ||
445 | static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) | ||
446 | { | ||
447 | struct zfcp_adapter *adapter = | ||
448 | container_of(sl, struct zfcp_adapter, service_level); | ||
449 | |||
450 | seq_printf(m, "zfcp: %s microcode level %x\n", | ||
451 | dev_name(&adapter->ccw_device->dev), | ||
452 | adapter->fsf_lic_version); | ||
453 | } | ||
454 | |||
439 | /** | 455 | /** |
440 | * zfcp_adapter_enqueue - enqueue a new adapter to the list | 456 | * zfcp_adapter_enqueue - enqueue a new adapter to the list |
441 | * @ccw_device: pointer to the struct cc_device | 457 | * @ccw_device: pointer to the struct cc_device |
@@ -500,6 +516,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
500 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); | 516 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); |
501 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); | 517 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); |
502 | 518 | ||
519 | adapter->service_level.seq_print = zfcp_print_sl; | ||
520 | |||
503 | /* mark adapter unusable as long as sysfs registration is not complete */ | 521 | /* mark adapter unusable as long as sysfs registration is not complete */ |
504 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); | 522 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); |
505 | 523 | ||
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 951a8d409d1d..728147131e1d 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /** | 14 | /** |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index ec2abceca6dc..f1a7518e67ed 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Copyright IBM Corporation 2008 | 7 | * Copyright IBM Corporation 2008 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "zfcp" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/types.h> | 13 | #include <linux/types.h> |
11 | #include <linux/miscdevice.h> | 14 | #include <linux/miscdevice.h> |
12 | #include <asm/ccwdev.h> | 15 | #include <asm/ccwdev.h> |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 31012d58cfb7..735d675623f8 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include "zfcp_ext.h" | 14 | #include "zfcp_ext.h" |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ce4c75bd190..e19e46ae4a68 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/qdio.h> | 33 | #include <asm/qdio.h> |
34 | #include <asm/debug.h> | 34 | #include <asm/debug.h> |
35 | #include <asm/ebcdic.h> | 35 | #include <asm/ebcdic.h> |
36 | #include <asm/sysinfo.h> | ||
36 | #include "zfcp_dbf.h" | 37 | #include "zfcp_dbf.h" |
37 | #include "zfcp_fsf.h" | 38 | #include "zfcp_fsf.h" |
38 | 39 | ||
@@ -515,6 +516,7 @@ struct zfcp_adapter { | |||
515 | struct fsf_qtcb_bottom_port *stats_reset_data; | 516 | struct fsf_qtcb_bottom_port *stats_reset_data; |
516 | unsigned long stats_reset; | 517 | unsigned long stats_reset; |
517 | struct work_struct scan_work; | 518 | struct work_struct scan_work; |
519 | struct service_level service_level; | ||
518 | atomic_t qdio_outb_full; /* queue full incidents */ | 520 | atomic_t qdio_outb_full; /* queue full incidents */ |
519 | }; | 521 | }; |
520 | 522 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c557ba34e1aa..4ed4950d994b 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_MAX_ERPS 3 | 14 | #define ZFCP_MAX_ERPS 3 |
@@ -1281,10 +1284,13 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1281 | break; | 1284 | break; |
1282 | 1285 | ||
1283 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1286 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1284 | if (result != ZFCP_ERP_SUCCEEDED) | 1287 | if (result != ZFCP_ERP_SUCCEEDED) { |
1288 | unregister_service_level(&adapter->service_level); | ||
1285 | zfcp_erp_rports_del(adapter); | 1289 | zfcp_erp_rports_del(adapter); |
1286 | else | 1290 | } else { |
1291 | register_service_level(&adapter->service_level); | ||
1287 | schedule_work(&adapter->scan_work); | 1292 | schedule_work(&adapter->scan_work); |
1293 | } | ||
1288 | zfcp_adapter_put(adapter); | 1294 | zfcp_adapter_put(adapter); |
1289 | break; | 1295 | break; |
1290 | } | 1296 | } |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8aab3091a7b1..f009f2a7ec3e 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | struct ct_iu_gpn_ft_req { | 14 | struct ct_iu_gpn_ft_req { |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index dc0367690405..9c72e083559d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/blktrace_api.h> | 12 | #include <linux/blktrace_api.h> |
10 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 664752f90b20..d3b55fb66f13 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ | 14 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 468c880f8b6d..9dc42a68fbdd 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index ca9293ba1766..899af2b45b1e 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ | 14 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index c3e4ab07b9cc..0eea90781385 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -1,17 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/sysinfo.c | 2 | * drivers/s390/sysinfo.c |
3 | * | 3 | * |
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2001, 2008 |
5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) | 5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
10 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | ||
11 | #include <linux/init.h> | 13 | #include <linux/init.h> |
12 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/module.h> | ||
13 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
14 | #include <asm/sysinfo.h> | 17 | #include <asm/sysinfo.h> |
18 | #include <asm/cpcmd.h> | ||
15 | 19 | ||
16 | /* Sigh, math-emu. Don't ask. */ | 20 | /* Sigh, math-emu. Don't ask. */ |
17 | #include <asm/sfp-util.h> | 21 | #include <asm/sfp-util.h> |
@@ -271,6 +275,125 @@ static __init int create_proc_sysinfo(void) | |||
271 | 275 | ||
272 | __initcall(create_proc_sysinfo); | 276 | __initcall(create_proc_sysinfo); |
273 | 277 | ||
278 | /* | ||
279 | * Service levels interface. | ||
280 | */ | ||
281 | |||
282 | static DECLARE_RWSEM(service_level_sem); | ||
283 | static LIST_HEAD(service_level_list); | ||
284 | |||
285 | int register_service_level(struct service_level *slr) | ||
286 | { | ||
287 | struct service_level *ptr; | ||
288 | |||
289 | down_write(&service_level_sem); | ||
290 | list_for_each_entry(ptr, &service_level_list, list) | ||
291 | if (ptr == slr) { | ||
292 | up_write(&service_level_sem); | ||
293 | return -EEXIST; | ||
294 | } | ||
295 | list_add_tail(&slr->list, &service_level_list); | ||
296 | up_write(&service_level_sem); | ||
297 | return 0; | ||
298 | } | ||
299 | EXPORT_SYMBOL(register_service_level); | ||
300 | |||
301 | int unregister_service_level(struct service_level *slr) | ||
302 | { | ||
303 | struct service_level *ptr, *next; | ||
304 | int rc = -ENOENT; | ||
305 | |||
306 | down_write(&service_level_sem); | ||
307 | list_for_each_entry_safe(ptr, next, &service_level_list, list) { | ||
308 | if (ptr != slr) | ||
309 | continue; | ||
310 | list_del(&ptr->list); | ||
311 | rc = 0; | ||
312 | break; | ||
313 | } | ||
314 | up_write(&service_level_sem); | ||
315 | return rc; | ||
316 | } | ||
317 | EXPORT_SYMBOL(unregister_service_level); | ||
318 | |||
319 | static void *service_level_start(struct seq_file *m, loff_t *pos) | ||
320 | { | ||
321 | down_read(&service_level_sem); | ||
322 | return seq_list_start(&service_level_list, *pos); | ||
323 | } | ||
324 | |||
325 | static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) | ||
326 | { | ||
327 | return seq_list_next(p, &service_level_list, pos); | ||
328 | } | ||
329 | |||
330 | static void service_level_stop(struct seq_file *m, void *p) | ||
331 | { | ||
332 | up_read(&service_level_sem); | ||
333 | } | ||
334 | |||
335 | static int service_level_show(struct seq_file *m, void *p) | ||
336 | { | ||
337 | struct service_level *slr; | ||
338 | |||
339 | slr = list_entry(p, struct service_level, list); | ||
340 | slr->seq_print(m, slr); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static const struct seq_operations service_level_seq_ops = { | ||
345 | .start = service_level_start, | ||
346 | .next = service_level_next, | ||
347 | .stop = service_level_stop, | ||
348 | .show = service_level_show | ||
349 | }; | ||
350 | |||
351 | static int service_level_open(struct inode *inode, struct file *file) | ||
352 | { | ||
353 | return seq_open(file, &service_level_seq_ops); | ||
354 | } | ||
355 | |||
356 | static const struct file_operations service_level_ops = { | ||
357 | .open = service_level_open, | ||
358 | .read = seq_read, | ||
359 | .llseek = seq_lseek, | ||
360 | .release = seq_release | ||
361 | }; | ||
362 | |||
363 | static void service_level_vm_print(struct seq_file *m, | ||
364 | struct service_level *slr) | ||
365 | { | ||
366 | char *query_buffer, *str; | ||
367 | |||
368 | query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); | ||
369 | if (!query_buffer) | ||
370 | return; | ||
371 | cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); | ||
372 | str = strchr(query_buffer, '\n'); | ||
373 | if (str) | ||
374 | *str = 0; | ||
375 | seq_printf(m, "VM: %s\n", query_buffer); | ||
376 | kfree(query_buffer); | ||
377 | } | ||
378 | |||
379 | static struct service_level service_level_vm = { | ||
380 | .seq_print = service_level_vm_print | ||
381 | }; | ||
382 | |||
383 | static __init int create_proc_service_level(void) | ||
384 | { | ||
385 | proc_create("service_levels", 0, NULL, &service_level_ops); | ||
386 | if (MACHINE_IS_VM) | ||
387 | register_service_level(&service_level_vm); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | subsys_initcall(create_proc_service_level); | ||
392 | |||
393 | /* | ||
394 | * Bogomips calculation based on cpu capability. | ||
395 | */ | ||
396 | |||
274 | int get_cpu_capability(unsigned int *capability) | 397 | int get_cpu_capability(unsigned int *capability) |
275 | { | 398 | { |
276 | struct sysinfo_1_2_2 *info; | 399 | struct sysinfo_1_2_2 *info; |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 162cd927d94b..94acbeed4e7c 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = { | |||
175 | { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ | 175 | { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ |
176 | { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ | 176 | { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ |
177 | { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ | 177 | { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ |
178 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */ | 178 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */ |
179 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */ | 179 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */ |
180 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ | 180 | { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ |
181 | { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ | 181 | { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ |
182 | { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ | 182 | { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 2a5b29d12172..e2dd6a45924a 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
864 | 864 | ||
865 | INIT_WORK(&vport->crq_work, handle_crq); | 865 | INIT_WORK(&vport->crq_work, handle_crq); |
866 | 866 | ||
867 | err = crq_queue_create(&vport->crq_queue, target); | 867 | err = scsi_add_host(shost, target->dev); |
868 | if (err) | 868 | if (err) |
869 | goto free_srp_target; | 869 | goto free_srp_target; |
870 | 870 | ||
871 | err = scsi_add_host(shost, target->dev); | 871 | err = scsi_tgt_alloc_queue(shost); |
872 | if (err) | 872 | if (err) |
873 | goto destroy_queue; | 873 | goto remove_host; |
874 | 874 | ||
875 | err = scsi_tgt_alloc_queue(shost); | 875 | err = crq_queue_create(&vport->crq_queue, target); |
876 | if (err) | 876 | if (err) |
877 | goto destroy_queue; | 877 | goto free_queue; |
878 | 878 | ||
879 | return 0; | 879 | return 0; |
880 | destroy_queue: | 880 | free_queue: |
881 | crq_queue_destroy(target); | 881 | scsi_tgt_free_queue(shost); |
882 | remove_host: | ||
883 | scsi_remove_host(shost); | ||
882 | free_srp_target: | 884 | free_srp_target: |
883 | srp_target_free(target); | 885 | srp_target_free(target); |
884 | put_host: | 886 | put_host: |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 801c7cf54d2e..3fdee7370ccc 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -489,12 +489,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
489 | if (!__kfifo_get(session->cmdpool.queue, | 489 | if (!__kfifo_get(session->cmdpool.queue, |
490 | (void*)&task, sizeof(void*))) | 490 | (void*)&task, sizeof(void*))) |
491 | return NULL; | 491 | return NULL; |
492 | |||
493 | if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) && | ||
494 | hdr->ttt == RESERVED_ITT) { | ||
495 | conn->ping_task = task; | ||
496 | conn->last_ping = jiffies; | ||
497 | } | ||
498 | } | 492 | } |
499 | /* | 493 | /* |
500 | * released in complete pdu for task we expect a response for, and | 494 | * released in complete pdu for task we expect a response for, and |
@@ -703,6 +697,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) | |||
703 | task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); | 697 | task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); |
704 | if (!task) | 698 | if (!task) |
705 | iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); | 699 | iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); |
700 | else if (!rhdr) { | ||
701 | /* only track our nops */ | ||
702 | conn->ping_task = task; | ||
703 | conn->last_ping = jiffies; | ||
704 | } | ||
706 | } | 705 | } |
707 | 706 | ||
708 | static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | 707 | static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fa45a1a66867..148d3af92aef 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -648,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | |||
648 | struct request *req = cmd->request; | 648 | struct request *req = cmd->request; |
649 | unsigned long flags; | 649 | unsigned long flags; |
650 | 650 | ||
651 | scsi_unprep_request(req); | ||
652 | spin_lock_irqsave(q->queue_lock, flags); | 651 | spin_lock_irqsave(q->queue_lock, flags); |
652 | scsi_unprep_request(req); | ||
653 | blk_requeue_request(q, req); | 653 | blk_requeue_request(q, req); |
654 | spin_unlock_irqrestore(q->queue_lock, flags); | 654 | spin_unlock_irqrestore(q->queue_lock, flags); |
655 | 655 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 165fc010978c..557b54ab2f25 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -51,7 +51,6 @@ | |||
51 | #ifdef CONFIG_SUPERH | 51 | #ifdef CONFIG_SUPERH |
52 | #include <asm/clock.h> | 52 | #include <asm/clock.h> |
53 | #include <asm/sh_bios.h> | 53 | #include <asm/sh_bios.h> |
54 | #include <asm/kgdb.h> | ||
55 | #endif | 54 | #endif |
56 | 55 | ||
57 | #include "sh-sci.h" | 56 | #include "sh-sci.h" |
@@ -65,10 +64,6 @@ struct sci_port { | |||
65 | /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ | 64 | /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ |
66 | unsigned int irqs[SCIx_NR_IRQS]; | 65 | unsigned int irqs[SCIx_NR_IRQS]; |
67 | 66 | ||
68 | /* Port pin configuration */ | ||
69 | void (*init_pins)(struct uart_port *port, | ||
70 | unsigned int cflag); | ||
71 | |||
72 | /* Port enable callback */ | 67 | /* Port enable callback */ |
73 | void (*enable)(struct uart_port *port); | 68 | void (*enable)(struct uart_port *port); |
74 | 69 | ||
@@ -85,10 +80,6 @@ struct sci_port { | |||
85 | #endif | 80 | #endif |
86 | }; | 81 | }; |
87 | 82 | ||
88 | #ifdef CONFIG_SH_KGDB | ||
89 | static struct sci_port *kgdb_sci_port; | ||
90 | #endif | ||
91 | |||
92 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 83 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
93 | static struct sci_port *serial_console_port; | 84 | static struct sci_port *serial_console_port; |
94 | #endif | 85 | #endif |
@@ -101,21 +92,26 @@ static void sci_stop_tx(struct uart_port *port); | |||
101 | static struct sci_port sci_ports[SCI_NPORTS]; | 92 | static struct sci_port sci_ports[SCI_NPORTS]; |
102 | static struct uart_driver sci_uart_driver; | 93 | static struct uart_driver sci_uart_driver; |
103 | 94 | ||
104 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && \ | 95 | static inline struct sci_port * |
105 | defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | 96 | to_sci_port(struct uart_port *uart) |
97 | { | ||
98 | return container_of(uart, struct sci_port, port); | ||
99 | } | ||
100 | |||
101 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) | ||
102 | |||
103 | #ifdef CONFIG_CONSOLE_POLL | ||
106 | static inline void handle_error(struct uart_port *port) | 104 | static inline void handle_error(struct uart_port *port) |
107 | { | 105 | { |
108 | /* Clear error flags */ | 106 | /* Clear error flags */ |
109 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); | 107 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); |
110 | } | 108 | } |
111 | 109 | ||
112 | static int get_char(struct uart_port *port) | 110 | static int sci_poll_get_char(struct uart_port *port) |
113 | { | 111 | { |
114 | unsigned long flags; | ||
115 | unsigned short status; | 112 | unsigned short status; |
116 | int c; | 113 | int c; |
117 | 114 | ||
118 | spin_lock_irqsave(&port->lock, flags); | ||
119 | do { | 115 | do { |
120 | status = sci_in(port, SCxSR); | 116 | status = sci_in(port, SCxSR); |
121 | if (status & SCxSR_ERRORS(port)) { | 117 | if (status & SCxSR_ERRORS(port)) { |
@@ -123,23 +119,21 @@ static int get_char(struct uart_port *port) | |||
123 | continue; | 119 | continue; |
124 | } | 120 | } |
125 | } while (!(status & SCxSR_RDxF(port))); | 121 | } while (!(status & SCxSR_RDxF(port))); |
122 | |||
126 | c = sci_in(port, SCxRDR); | 123 | c = sci_in(port, SCxRDR); |
127 | sci_in(port, SCxSR); /* Dummy read */ | 124 | |
125 | /* Dummy read */ | ||
126 | sci_in(port, SCxSR); | ||
128 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 127 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
129 | spin_unlock_irqrestore(&port->lock, flags); | ||
130 | 128 | ||
131 | return c; | 129 | return c; |
132 | } | 130 | } |
133 | #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ | 131 | #endif |
134 | 132 | ||
135 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || defined(CONFIG_SH_KGDB) | 133 | static void sci_poll_put_char(struct uart_port *port, unsigned char c) |
136 | static void put_char(struct uart_port *port, char c) | ||
137 | { | 134 | { |
138 | unsigned long flags; | ||
139 | unsigned short status; | 135 | unsigned short status; |
140 | 136 | ||
141 | spin_lock_irqsave(&port->lock, flags); | ||
142 | |||
143 | do { | 137 | do { |
144 | status = sci_in(port, SCxSR); | 138 | status = sci_in(port, SCxSR); |
145 | } while (!(status & SCxSR_TDxE(port))); | 139 | } while (!(status & SCxSR_TDxE(port))); |
@@ -147,96 +141,22 @@ static void put_char(struct uart_port *port, char c) | |||
147 | sci_in(port, SCxSR); /* Dummy read */ | 141 | sci_in(port, SCxSR); /* Dummy read */ |
148 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); | 142 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); |
149 | sci_out(port, SCxTDR, c); | 143 | sci_out(port, SCxTDR, c); |
150 | |||
151 | spin_unlock_irqrestore(&port->lock, flags); | ||
152 | } | 144 | } |
153 | #endif | 145 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ |
154 | |||
155 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | ||
156 | static void put_string(struct sci_port *sci_port, const char *buffer, int count) | ||
157 | { | ||
158 | struct uart_port *port = &sci_port->port; | ||
159 | const unsigned char *p = buffer; | ||
160 | int i; | ||
161 | |||
162 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | ||
163 | int checksum; | ||
164 | int usegdb=0; | ||
165 | |||
166 | #ifdef CONFIG_SH_STANDARD_BIOS | ||
167 | /* This call only does a trap the first time it is | ||
168 | * called, and so is safe to do here unconditionally | ||
169 | */ | ||
170 | usegdb |= sh_bios_in_gdb_mode(); | ||
171 | #endif | ||
172 | #ifdef CONFIG_SH_KGDB | ||
173 | usegdb |= (kgdb_in_gdb_mode && (sci_port == kgdb_sci_port)); | ||
174 | #endif | ||
175 | |||
176 | if (usegdb) { | ||
177 | /* $<packet info>#<checksum>. */ | ||
178 | do { | ||
179 | unsigned char c; | ||
180 | put_char(port, '$'); | ||
181 | put_char(port, 'O'); /* 'O'utput to console */ | ||
182 | checksum = 'O'; | ||
183 | |||
184 | for (i=0; i<count; i++) { /* Don't use run length encoding */ | ||
185 | int h, l; | ||
186 | |||
187 | c = *p++; | ||
188 | h = hex_asc_hi(c); | ||
189 | l = hex_asc_lo(c); | ||
190 | put_char(port, h); | ||
191 | put_char(port, l); | ||
192 | checksum += h + l; | ||
193 | } | ||
194 | put_char(port, '#'); | ||
195 | put_char(port, hex_asc_hi(checksum)); | ||
196 | put_char(port, hex_asc_lo(checksum)); | ||
197 | } while (get_char(port) != '+'); | ||
198 | } else | ||
199 | #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ | ||
200 | for (i=0; i<count; i++) { | ||
201 | if (*p == 10) | ||
202 | put_char(port, '\r'); | ||
203 | put_char(port, *p++); | ||
204 | } | ||
205 | } | ||
206 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ | ||
207 | |||
208 | #ifdef CONFIG_SH_KGDB | ||
209 | static int kgdb_sci_getchar(void) | ||
210 | { | ||
211 | int c; | ||
212 | |||
213 | /* Keep trying to read a character, this could be neater */ | ||
214 | while ((c = get_char(&kgdb_sci_port->port)) < 0) | ||
215 | cpu_relax(); | ||
216 | |||
217 | return c; | ||
218 | } | ||
219 | |||
220 | static inline void kgdb_sci_putchar(int c) | ||
221 | { | ||
222 | put_char(&kgdb_sci_port->port, c); | ||
223 | } | ||
224 | #endif /* CONFIG_SH_KGDB */ | ||
225 | 146 | ||
226 | #if defined(__H8300S__) | 147 | #if defined(__H8300S__) |
227 | enum { sci_disable, sci_enable }; | 148 | enum { sci_disable, sci_enable }; |
228 | 149 | ||
229 | static void h8300_sci_config(struct uart_port* port, unsigned int ctrl) | 150 | static void h8300_sci_config(struct uart_port *port, unsigned int ctrl) |
230 | { | 151 | { |
231 | volatile unsigned char *mstpcrl=(volatile unsigned char *)MSTPCRL; | 152 | volatile unsigned char *mstpcrl = (volatile unsigned char *)MSTPCRL; |
232 | int ch = (port->mapbase - SMR0) >> 3; | 153 | int ch = (port->mapbase - SMR0) >> 3; |
233 | unsigned char mask = 1 << (ch+1); | 154 | unsigned char mask = 1 << (ch+1); |
234 | 155 | ||
235 | if (ctrl == sci_disable) { | 156 | if (ctrl == sci_disable) |
236 | *mstpcrl |= mask; | 157 | *mstpcrl |= mask; |
237 | } else { | 158 | else |
238 | *mstpcrl &= ~mask; | 159 | *mstpcrl &= ~mask; |
239 | } | ||
240 | } | 160 | } |
241 | 161 | ||
242 | static inline void h8300_sci_enable(struct uart_port *port) | 162 | static inline void h8300_sci_enable(struct uart_port *port) |
@@ -251,7 +171,7 @@ static inline void h8300_sci_disable(struct uart_port *port) | |||
251 | #endif | 171 | #endif |
252 | 172 | ||
253 | #if defined(__H8300H__) || defined(__H8300S__) | 173 | #if defined(__H8300H__) || defined(__H8300S__) |
254 | static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag) | 174 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) |
255 | { | 175 | { |
256 | int ch = (port->mapbase - SMR0) >> 3; | 176 | int ch = (port->mapbase - SMR0) >> 3; |
257 | 177 | ||
@@ -266,141 +186,99 @@ static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag) | |||
266 | /* tx mark output*/ | 186 | /* tx mark output*/ |
267 | H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; | 187 | H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; |
268 | } | 188 | } |
269 | #else | 189 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) |
270 | #define sci_init_pins_sci NULL | 190 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
271 | #endif | ||
272 | |||
273 | #if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
274 | static void sci_init_pins_irda(struct uart_port *port, unsigned int cflag) | ||
275 | { | 191 | { |
276 | unsigned int fcr_val = 0; | 192 | if (port->mapbase == 0xA4400000) { |
277 | 193 | __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); | |
278 | if (cflag & CRTSCTS) | 194 | __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); |
279 | fcr_val |= SCFCR_MCE; | 195 | } else if (port->mapbase == 0xA4410000) |
280 | 196 | __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); | |
281 | sci_out(port, SCFCR, fcr_val); | ||
282 | } | ||
283 | #else | ||
284 | #define sci_init_pins_irda NULL | ||
285 | #endif | ||
286 | |||
287 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
288 | static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag) | ||
289 | { | ||
290 | unsigned int fcr_val = 0; | ||
291 | |||
292 | set_sh771x_scif_pfc(port); | ||
293 | if (cflag & CRTSCTS) { | ||
294 | fcr_val |= SCFCR_MCE; | ||
295 | } | ||
296 | sci_out(port, SCFCR, fcr_val); | ||
297 | } | 197 | } |
298 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) | 198 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) |
299 | static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) | 199 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
300 | { | 200 | { |
301 | unsigned int fcr_val = 0; | ||
302 | unsigned short data; | 201 | unsigned short data; |
303 | 202 | ||
304 | if (cflag & CRTSCTS) { | 203 | if (cflag & CRTSCTS) { |
305 | /* enable RTS/CTS */ | 204 | /* enable RTS/CTS */ |
306 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | 205 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ |
307 | /* Clear PTCR bit 9-2; enable all scif pins but sck */ | 206 | /* Clear PTCR bit 9-2; enable all scif pins but sck */ |
308 | data = ctrl_inw(PORT_PTCR); | 207 | data = __raw_readw(PORT_PTCR); |
309 | ctrl_outw((data & 0xfc03), PORT_PTCR); | 208 | __raw_writew((data & 0xfc03), PORT_PTCR); |
310 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | 209 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ |
311 | /* Clear PVCR bit 9-2 */ | 210 | /* Clear PVCR bit 9-2 */ |
312 | data = ctrl_inw(PORT_PVCR); | 211 | data = __raw_readw(PORT_PVCR); |
313 | ctrl_outw((data & 0xfc03), PORT_PVCR); | 212 | __raw_writew((data & 0xfc03), PORT_PVCR); |
314 | } | 213 | } |
315 | fcr_val |= SCFCR_MCE; | ||
316 | } else { | 214 | } else { |
317 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | 215 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ |
318 | /* Clear PTCR bit 5-2; enable only tx and rx */ | 216 | /* Clear PTCR bit 5-2; enable only tx and rx */ |
319 | data = ctrl_inw(PORT_PTCR); | 217 | data = __raw_readw(PORT_PTCR); |
320 | ctrl_outw((data & 0xffc3), PORT_PTCR); | 218 | __raw_writew((data & 0xffc3), PORT_PTCR); |
321 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | 219 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ |
322 | /* Clear PVCR bit 5-2 */ | 220 | /* Clear PVCR bit 5-2 */ |
323 | data = ctrl_inw(PORT_PVCR); | 221 | data = __raw_readw(PORT_PVCR); |
324 | ctrl_outw((data & 0xffc3), PORT_PVCR); | 222 | __raw_writew((data & 0xffc3), PORT_PVCR); |
325 | } | 223 | } |
326 | } | 224 | } |
327 | sci_out(port, SCFCR, fcr_val); | ||
328 | } | 225 | } |
329 | #elif defined(CONFIG_CPU_SH3) | 226 | #elif defined(CONFIG_CPU_SH3) |
330 | /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ | 227 | /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ |
331 | static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) | 228 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
332 | { | 229 | { |
333 | unsigned int fcr_val = 0; | ||
334 | unsigned short data; | 230 | unsigned short data; |
335 | 231 | ||
336 | /* We need to set SCPCR to enable RTS/CTS */ | 232 | /* We need to set SCPCR to enable RTS/CTS */ |
337 | data = ctrl_inw(SCPCR); | 233 | data = __raw_readw(SCPCR); |
338 | /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ | 234 | /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ |
339 | ctrl_outw(data & 0x0fcf, SCPCR); | 235 | __raw_writew(data & 0x0fcf, SCPCR); |
340 | 236 | ||
341 | if (cflag & CRTSCTS) | 237 | if (!(cflag & CRTSCTS)) { |
342 | fcr_val |= SCFCR_MCE; | ||
343 | else { | ||
344 | /* We need to set SCPCR to enable RTS/CTS */ | 238 | /* We need to set SCPCR to enable RTS/CTS */ |
345 | data = ctrl_inw(SCPCR); | 239 | data = __raw_readw(SCPCR); |
346 | /* Clear out SCP7MD1,0, SCP4MD1,0, | 240 | /* Clear out SCP7MD1,0, SCP4MD1,0, |
347 | Set SCP6MD1,0 = {01} (output) */ | 241 | Set SCP6MD1,0 = {01} (output) */ |
348 | ctrl_outw((data & 0x0fcf) | 0x1000, SCPCR); | 242 | __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); |
349 | 243 | ||
350 | data = ctrl_inb(SCPDR); | 244 | data = ctrl_inb(SCPDR); |
351 | /* Set /RTS2 (bit6) = 0 */ | 245 | /* Set /RTS2 (bit6) = 0 */ |
352 | ctrl_outb(data & 0xbf, SCPDR); | 246 | ctrl_outb(data & 0xbf, SCPDR); |
353 | } | 247 | } |
354 | |||
355 | sci_out(port, SCFCR, fcr_val); | ||
356 | } | 248 | } |
357 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | 249 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) |
358 | static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) | 250 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
359 | { | 251 | { |
360 | unsigned int fcr_val = 0; | ||
361 | unsigned short data; | 252 | unsigned short data; |
362 | 253 | ||
363 | if (port->mapbase == 0xffe00000) { | 254 | if (port->mapbase == 0xffe00000) { |
364 | data = ctrl_inw(PSCR); | 255 | data = __raw_readw(PSCR); |
365 | data &= ~0x03cf; | 256 | data &= ~0x03cf; |
366 | if (cflag & CRTSCTS) | 257 | if (!(cflag & CRTSCTS)) |
367 | fcr_val |= SCFCR_MCE; | ||
368 | else | ||
369 | data |= 0x0340; | 258 | data |= 0x0340; |
370 | 259 | ||
371 | ctrl_outw(data, PSCR); | 260 | __raw_writew(data, PSCR); |
372 | } | 261 | } |
373 | /* SCIF1 and SCIF2 should be setup by board code */ | ||
374 | |||
375 | sci_out(port, SCFCR, fcr_val); | ||
376 | } | ||
377 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) | ||
378 | static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) | ||
379 | { | ||
380 | /* Nothing to do here.. */ | ||
381 | sci_out(port, SCFCR, 0); | ||
382 | } | 262 | } |
383 | #else | ||
384 | /* For SH7750 */ | ||
385 | static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) | ||
386 | { | ||
387 | unsigned int fcr_val = 0; | ||
388 | |||
389 | if (cflag & CRTSCTS) { | ||
390 | fcr_val |= SCFCR_MCE; | ||
391 | } else { | ||
392 | #if defined(CONFIG_CPU_SUBTYPE_SH7343) || defined(CONFIG_CPU_SUBTYPE_SH7366) | ||
393 | /* Nothing */ | ||
394 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | 263 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
395 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 264 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
396 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 265 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
397 | defined(CONFIG_CPU_SUBTYPE_SHX3) | 266 | defined(CONFIG_CPU_SUBTYPE_SHX3) |
398 | ctrl_outw(0x0080, SCSPTR0); /* Set RTS = 1 */ | 267 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
268 | { | ||
269 | if (!(cflag & CRTSCTS)) | ||
270 | __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ | ||
271 | } | ||
272 | #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) | ||
273 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
274 | { | ||
275 | if (!(cflag & CRTSCTS)) | ||
276 | __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ | ||
277 | } | ||
399 | #else | 278 | #else |
400 | ctrl_outw(0x0080, SCSPTR2); /* Set RTS = 1 */ | 279 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
401 | #endif | 280 | { |
402 | } | 281 | /* Nothing to do */ |
403 | sci_out(port, SCFCR, fcr_val); | ||
404 | } | 282 | } |
405 | #endif | 283 | #endif |
406 | 284 | ||
@@ -419,18 +297,26 @@ static inline int scif_rxroom(struct uart_port *port) | |||
419 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | 297 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) |
420 | static inline int scif_txroom(struct uart_port *port) | 298 | static inline int scif_txroom(struct uart_port *port) |
421 | { | 299 | { |
422 | if((port->mapbase == 0xffe00000) || (port->mapbase == 0xffe08000)) /* SCIF0/1*/ | 300 | if ((port->mapbase == 0xffe00000) || |
301 | (port->mapbase == 0xffe08000)) { | ||
302 | /* SCIF0/1*/ | ||
423 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 303 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); |
424 | else /* SCIF2 */ | 304 | } else { |
305 | /* SCIF2 */ | ||
425 | return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); | 306 | return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); |
307 | } | ||
426 | } | 308 | } |
427 | 309 | ||
428 | static inline int scif_rxroom(struct uart_port *port) | 310 | static inline int scif_rxroom(struct uart_port *port) |
429 | { | 311 | { |
430 | if((port->mapbase == 0xffe00000) || (port->mapbase == 0xffe08000)) /* SCIF0/1*/ | 312 | if ((port->mapbase == 0xffe00000) || |
313 | (port->mapbase == 0xffe08000)) { | ||
314 | /* SCIF0/1*/ | ||
431 | return sci_in(port, SCRFDR) & 0xff; | 315 | return sci_in(port, SCRFDR) & 0xff; |
432 | else /* SCIF2 */ | 316 | } else { |
317 | /* SCIF2 */ | ||
433 | return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; | 318 | return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; |
319 | } | ||
434 | } | 320 | } |
435 | #else | 321 | #else |
436 | static inline int scif_txroom(struct uart_port *port) | 322 | static inline int scif_txroom(struct uart_port *port) |
@@ -446,12 +332,12 @@ static inline int scif_rxroom(struct uart_port *port) | |||
446 | 332 | ||
447 | static inline int sci_txroom(struct uart_port *port) | 333 | static inline int sci_txroom(struct uart_port *port) |
448 | { | 334 | { |
449 | return ((sci_in(port, SCxSR) & SCI_TDRE) != 0); | 335 | return (sci_in(port, SCxSR) & SCI_TDRE) != 0; |
450 | } | 336 | } |
451 | 337 | ||
452 | static inline int sci_rxroom(struct uart_port *port) | 338 | static inline int sci_rxroom(struct uart_port *port) |
453 | { | 339 | { |
454 | return ((sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0); | 340 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; |
455 | } | 341 | } |
456 | 342 | ||
457 | /* ********************************************************************** * | 343 | /* ********************************************************************** * |
@@ -469,11 +355,10 @@ static void sci_transmit_chars(struct uart_port *port) | |||
469 | status = sci_in(port, SCxSR); | 355 | status = sci_in(port, SCxSR); |
470 | if (!(status & SCxSR_TDxE(port))) { | 356 | if (!(status & SCxSR_TDxE(port))) { |
471 | ctrl = sci_in(port, SCSCR); | 357 | ctrl = sci_in(port, SCSCR); |
472 | if (uart_circ_empty(xmit)) { | 358 | if (uart_circ_empty(xmit)) |
473 | ctrl &= ~SCI_CTRL_FLAGS_TIE; | 359 | ctrl &= ~SCI_CTRL_FLAGS_TIE; |
474 | } else { | 360 | else |
475 | ctrl |= SCI_CTRL_FLAGS_TIE; | 361 | ctrl |= SCI_CTRL_FLAGS_TIE; |
476 | } | ||
477 | sci_out(port, SCSCR, ctrl); | 362 | sci_out(port, SCSCR, ctrl); |
478 | return; | 363 | return; |
479 | } | 364 | } |
@@ -521,11 +406,11 @@ static void sci_transmit_chars(struct uart_port *port) | |||
521 | } | 406 | } |
522 | 407 | ||
523 | /* On SH3, SCIF may read end-of-break as a space->mark char */ | 408 | /* On SH3, SCIF may read end-of-break as a space->mark char */ |
524 | #define STEPFN(c) ({int __c=(c); (((__c-1)|(__c)) == -1); }) | 409 | #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) |
525 | 410 | ||
526 | static inline void sci_receive_chars(struct uart_port *port) | 411 | static inline void sci_receive_chars(struct uart_port *port) |
527 | { | 412 | { |
528 | struct sci_port *sci_port = (struct sci_port *)port; | 413 | struct sci_port *sci_port = to_sci_port(port); |
529 | struct tty_struct *tty = port->info->port.tty; | 414 | struct tty_struct *tty = port->info->port.tty; |
530 | int i, count, copied = 0; | 415 | int i, count, copied = 0; |
531 | unsigned short status; | 416 | unsigned short status; |
@@ -550,13 +435,13 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
550 | 435 | ||
551 | if (port->type == PORT_SCI) { | 436 | if (port->type == PORT_SCI) { |
552 | char c = sci_in(port, SCxRDR); | 437 | char c = sci_in(port, SCxRDR); |
553 | if (uart_handle_sysrq_char(port, c) || sci_port->break_flag) | 438 | if (uart_handle_sysrq_char(port, c) || |
439 | sci_port->break_flag) | ||
554 | count = 0; | 440 | count = 0; |
555 | else { | 441 | else |
556 | tty_insert_flip_char(tty, c, TTY_NORMAL); | 442 | tty_insert_flip_char(tty, c, TTY_NORMAL); |
557 | } | ||
558 | } else { | 443 | } else { |
559 | for (i=0; i<count; i++) { | 444 | for (i = 0; i < count; i++) { |
560 | char c = sci_in(port, SCxRDR); | 445 | char c = sci_in(port, SCxRDR); |
561 | status = sci_in(port, SCxSR); | 446 | status = sci_in(port, SCxSR); |
562 | #if defined(CONFIG_CPU_SH3) | 447 | #if defined(CONFIG_CPU_SH3) |
@@ -569,7 +454,7 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
569 | } | 454 | } |
570 | 455 | ||
571 | /* Nonzero => end-of-break */ | 456 | /* Nonzero => end-of-break */ |
572 | pr_debug("scif: debounce<%02x>\n", c); | 457 | dev_dbg(port->dev, "debounce<%02x>\n", c); |
573 | sci_port->break_flag = 0; | 458 | sci_port->break_flag = 0; |
574 | 459 | ||
575 | if (STEPFN(c)) { | 460 | if (STEPFN(c)) { |
@@ -586,12 +471,13 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
586 | /* Store data and status */ | 471 | /* Store data and status */ |
587 | if (status&SCxSR_FER(port)) { | 472 | if (status&SCxSR_FER(port)) { |
588 | flag = TTY_FRAME; | 473 | flag = TTY_FRAME; |
589 | pr_debug("sci: frame error\n"); | 474 | dev_notice(port->dev, "frame error\n"); |
590 | } else if (status&SCxSR_PER(port)) { | 475 | } else if (status&SCxSR_PER(port)) { |
591 | flag = TTY_PARITY; | 476 | flag = TTY_PARITY; |
592 | pr_debug("sci: parity error\n"); | 477 | dev_notice(port->dev, "parity error\n"); |
593 | } else | 478 | } else |
594 | flag = TTY_NORMAL; | 479 | flag = TTY_NORMAL; |
480 | |||
595 | tty_insert_flip_char(tty, c, flag); | 481 | tty_insert_flip_char(tty, c, flag); |
596 | } | 482 | } |
597 | } | 483 | } |
@@ -651,13 +537,14 @@ static inline int sci_handle_errors(struct uart_port *port) | |||
651 | /* overrun error */ | 537 | /* overrun error */ |
652 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) | 538 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) |
653 | copied++; | 539 | copied++; |
654 | pr_debug("sci: overrun error\n"); | 540 | |
541 | dev_notice(port->dev, "overrun error"); | ||
655 | } | 542 | } |
656 | 543 | ||
657 | if (status & SCxSR_FER(port)) { | 544 | if (status & SCxSR_FER(port)) { |
658 | if (sci_rxd_in(port) == 0) { | 545 | if (sci_rxd_in(port) == 0) { |
659 | /* Notify of BREAK */ | 546 | /* Notify of BREAK */ |
660 | struct sci_port *sci_port = (struct sci_port *)port; | 547 | struct sci_port *sci_port = to_sci_port(port); |
661 | 548 | ||
662 | if (!sci_port->break_flag) { | 549 | if (!sci_port->break_flag) { |
663 | sci_port->break_flag = 1; | 550 | sci_port->break_flag = 1; |
@@ -666,15 +553,19 @@ static inline int sci_handle_errors(struct uart_port *port) | |||
666 | /* Do sysrq handling. */ | 553 | /* Do sysrq handling. */ |
667 | if (uart_handle_break(port)) | 554 | if (uart_handle_break(port)) |
668 | return 0; | 555 | return 0; |
669 | pr_debug("sci: BREAK detected\n"); | 556 | |
557 | dev_dbg(port->dev, "BREAK detected\n"); | ||
558 | |||
670 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) | 559 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) |
671 | copied++; | 560 | copied++; |
672 | } | 561 | } |
562 | |||
673 | } else { | 563 | } else { |
674 | /* frame error */ | 564 | /* frame error */ |
675 | if (tty_insert_flip_char(tty, 0, TTY_FRAME)) | 565 | if (tty_insert_flip_char(tty, 0, TTY_FRAME)) |
676 | copied++; | 566 | copied++; |
677 | pr_debug("sci: frame error\n"); | 567 | |
568 | dev_notice(port->dev, "frame error\n"); | ||
678 | } | 569 | } |
679 | } | 570 | } |
680 | 571 | ||
@@ -682,7 +573,8 @@ static inline int sci_handle_errors(struct uart_port *port) | |||
682 | /* parity error */ | 573 | /* parity error */ |
683 | if (tty_insert_flip_char(tty, 0, TTY_PARITY)) | 574 | if (tty_insert_flip_char(tty, 0, TTY_PARITY)) |
684 | copied++; | 575 | copied++; |
685 | pr_debug("sci: parity error\n"); | 576 | |
577 | dev_notice(port->dev, "parity error"); | ||
686 | } | 578 | } |
687 | 579 | ||
688 | if (copied) | 580 | if (copied) |
@@ -691,6 +583,27 @@ static inline int sci_handle_errors(struct uart_port *port) | |||
691 | return copied; | 583 | return copied; |
692 | } | 584 | } |
693 | 585 | ||
586 | static inline int sci_handle_fifo_overrun(struct uart_port *port) | ||
587 | { | ||
588 | struct tty_struct *tty = port->info->port.tty; | ||
589 | int copied = 0; | ||
590 | |||
591 | if (port->type != PORT_SCIF) | ||
592 | return 0; | ||
593 | |||
594 | if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { | ||
595 | sci_out(port, SCLSR, 0); | ||
596 | |||
597 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
598 | tty_flip_buffer_push(tty); | ||
599 | |||
600 | dev_notice(port->dev, "overrun error\n"); | ||
601 | copied++; | ||
602 | } | ||
603 | |||
604 | return copied; | ||
605 | } | ||
606 | |||
694 | static inline int sci_handle_breaks(struct uart_port *port) | 607 | static inline int sci_handle_breaks(struct uart_port *port) |
695 | { | 608 | { |
696 | int copied = 0; | 609 | int copied = 0; |
@@ -709,23 +622,15 @@ static inline int sci_handle_breaks(struct uart_port *port) | |||
709 | /* Notify of BREAK */ | 622 | /* Notify of BREAK */ |
710 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) | 623 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) |
711 | copied++; | 624 | copied++; |
712 | pr_debug("sci: BREAK detected\n"); | ||
713 | } | ||
714 | 625 | ||
715 | #if defined(SCIF_ORER) | 626 | dev_dbg(port->dev, "BREAK detected\n"); |
716 | /* XXX: Handle SCIF overrun error */ | ||
717 | if (port->type != PORT_SCI && (sci_in(port, SCLSR) & SCIF_ORER) != 0) { | ||
718 | sci_out(port, SCLSR, 0); | ||
719 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) { | ||
720 | copied++; | ||
721 | pr_debug("sci: overrun error\n"); | ||
722 | } | ||
723 | } | 627 | } |
724 | #endif | ||
725 | 628 | ||
726 | if (copied) | 629 | if (copied) |
727 | tty_flip_buffer_push(tty); | 630 | tty_flip_buffer_push(tty); |
728 | 631 | ||
632 | copied += sci_handle_fifo_overrun(port); | ||
633 | |||
729 | return copied; | 634 | return copied; |
730 | } | 635 | } |
731 | 636 | ||
@@ -763,16 +668,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr) | |||
763 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 668 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
764 | } | 669 | } |
765 | } else { | 670 | } else { |
766 | #if defined(SCIF_ORER) | 671 | sci_handle_fifo_overrun(port); |
767 | if((sci_in(port, SCLSR) & SCIF_ORER) != 0) { | ||
768 | struct tty_struct *tty = port->info->port.tty; | ||
769 | |||
770 | sci_out(port, SCLSR, 0); | ||
771 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
772 | tty_flip_buffer_push(tty); | ||
773 | pr_debug("scif: overrun error\n"); | ||
774 | } | ||
775 | #endif | ||
776 | sci_rx_interrupt(irq, ptr); | 672 | sci_rx_interrupt(irq, ptr); |
777 | } | 673 | } |
778 | 674 | ||
@@ -801,8 +697,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
801 | struct uart_port *port = ptr; | 697 | struct uart_port *port = ptr; |
802 | irqreturn_t ret = IRQ_NONE; | 698 | irqreturn_t ret = IRQ_NONE; |
803 | 699 | ||
804 | ssr_status = sci_in(port,SCxSR); | 700 | ssr_status = sci_in(port, SCxSR); |
805 | scr_status = sci_in(port,SCSCR); | 701 | scr_status = sci_in(port, SCSCR); |
806 | 702 | ||
807 | /* Tx Interrupt */ | 703 | /* Tx Interrupt */ |
808 | if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE)) | 704 | if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE)) |
@@ -820,7 +716,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
820 | return ret; | 716 | return ret; |
821 | } | 717 | } |
822 | 718 | ||
823 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK) | 719 | #ifdef CONFIG_HAVE_CLK |
824 | /* | 720 | /* |
825 | * Here we define a transistion notifier so that we can update all of our | 721 | * Here we define a transistion notifier so that we can update all of our |
826 | * ports' baud rate when the peripheral clock changes. | 722 | * ports' baud rate when the peripheral clock changes. |
@@ -828,41 +724,20 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
828 | static int sci_notifier(struct notifier_block *self, | 724 | static int sci_notifier(struct notifier_block *self, |
829 | unsigned long phase, void *p) | 725 | unsigned long phase, void *p) |
830 | { | 726 | { |
831 | struct cpufreq_freqs *freqs = p; | ||
832 | int i; | 727 | int i; |
833 | 728 | ||
834 | if ((phase == CPUFREQ_POSTCHANGE) || | 729 | if ((phase == CPUFREQ_POSTCHANGE) || |
835 | (phase == CPUFREQ_RESUMECHANGE)){ | 730 | (phase == CPUFREQ_RESUMECHANGE)) |
836 | for (i = 0; i < SCI_NPORTS; i++) { | 731 | for (i = 0; i < SCI_NPORTS; i++) { |
837 | struct uart_port *port = &sci_ports[i].port; | 732 | struct sci_port *s = &sci_ports[i]; |
838 | struct clk *clk; | 733 | s->port.uartclk = clk_get_rate(s->clk); |
839 | |||
840 | /* | ||
841 | * Update the uartclk per-port if frequency has | ||
842 | * changed, since it will no longer necessarily be | ||
843 | * consistent with the old frequency. | ||
844 | * | ||
845 | * Really we want to be able to do something like | ||
846 | * uart_change_speed() or something along those lines | ||
847 | * here to implicitly reset the per-port baud rate.. | ||
848 | * | ||
849 | * Clean this up later.. | ||
850 | */ | ||
851 | clk = clk_get(NULL, "module_clk"); | ||
852 | port->uartclk = clk_get_rate(clk); | ||
853 | clk_put(clk); | ||
854 | } | 734 | } |
855 | 735 | ||
856 | printk(KERN_INFO "%s: got a postchange notification " | ||
857 | "for cpu %d (old %d, new %d)\n", | ||
858 | __func__, freqs->cpu, freqs->old, freqs->new); | ||
859 | } | ||
860 | |||
861 | return NOTIFY_OK; | 736 | return NOTIFY_OK; |
862 | } | 737 | } |
863 | 738 | ||
864 | static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 }; | 739 | static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 }; |
865 | #endif /* CONFIG_CPU_FREQ && CONFIG_HAVE_CLK */ | 740 | #endif |
866 | 741 | ||
867 | static int sci_request_irq(struct sci_port *port) | 742 | static int sci_request_irq(struct sci_port *port) |
868 | { | 743 | { |
@@ -875,23 +750,22 @@ static int sci_request_irq(struct sci_port *port) | |||
875 | "SCI Transmit Data Empty", "SCI Break" }; | 750 | "SCI Transmit Data Empty", "SCI Break" }; |
876 | 751 | ||
877 | if (port->irqs[0] == port->irqs[1]) { | 752 | if (port->irqs[0] == port->irqs[1]) { |
878 | if (!port->irqs[0]) { | 753 | if (unlikely(!port->irqs[0])) |
879 | printk(KERN_ERR "sci: Cannot allocate irq.(IRQ=0)\n"); | ||
880 | return -ENODEV; | 754 | return -ENODEV; |
881 | } | ||
882 | 755 | ||
883 | if (request_irq(port->irqs[0], sci_mpxed_interrupt, | 756 | if (request_irq(port->irqs[0], sci_mpxed_interrupt, |
884 | IRQF_DISABLED, "sci", port)) { | 757 | IRQF_DISABLED, "sci", port)) { |
885 | printk(KERN_ERR "sci: Cannot allocate irq.\n"); | 758 | dev_err(port->port.dev, "Can't allocate IRQ\n"); |
886 | return -ENODEV; | 759 | return -ENODEV; |
887 | } | 760 | } |
888 | } else { | 761 | } else { |
889 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { | 762 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { |
890 | if (!port->irqs[i]) | 763 | if (unlikely(!port->irqs[i])) |
891 | continue; | 764 | continue; |
765 | |||
892 | if (request_irq(port->irqs[i], handlers[i], | 766 | if (request_irq(port->irqs[i], handlers[i], |
893 | IRQF_DISABLED, desc[i], port)) { | 767 | IRQF_DISABLED, desc[i], port)) { |
894 | printk(KERN_ERR "sci: Cannot allocate irq.\n"); | 768 | dev_err(port->port.dev, "Can't allocate IRQ\n"); |
895 | return -ENODEV; | 769 | return -ENODEV; |
896 | } | 770 | } |
897 | } | 771 | } |
@@ -904,12 +778,9 @@ static void sci_free_irq(struct sci_port *port) | |||
904 | { | 778 | { |
905 | int i; | 779 | int i; |
906 | 780 | ||
907 | if (port->irqs[0] == port->irqs[1]) { | 781 | if (port->irqs[0] == port->irqs[1]) |
908 | if (!port->irqs[0]) | 782 | free_irq(port->irqs[0], port); |
909 | printk("sci: sci_free_irq error\n"); | 783 | else { |
910 | else | ||
911 | free_irq(port->irqs[0], port); | ||
912 | } else { | ||
913 | for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { | 784 | for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { |
914 | if (!port->irqs[i]) | 785 | if (!port->irqs[i]) |
915 | continue; | 786 | continue; |
@@ -1028,7 +899,6 @@ static void sci_shutdown(struct uart_port *port) | |||
1028 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | 899 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, |
1029 | struct ktermios *old) | 900 | struct ktermios *old) |
1030 | { | 901 | { |
1031 | struct sci_port *s = &sci_ports[port->line]; | ||
1032 | unsigned int status, baud, smr_val; | 902 | unsigned int status, baud, smr_val; |
1033 | int t = -1; | 903 | int t = -1; |
1034 | 904 | ||
@@ -1060,32 +930,36 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1060 | sci_out(port, SCSMR, smr_val); | 930 | sci_out(port, SCSMR, smr_val); |
1061 | 931 | ||
1062 | if (t > 0) { | 932 | if (t > 0) { |
1063 | if(t >= 256) { | 933 | if (t >= 256) { |
1064 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); | 934 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); |
1065 | t >>= 2; | 935 | t >>= 2; |
1066 | } else { | 936 | } else |
1067 | sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); | 937 | sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); |
1068 | } | 938 | |
1069 | sci_out(port, SCBRR, t); | 939 | sci_out(port, SCBRR, t); |
1070 | udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ | 940 | udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ |
1071 | } | 941 | } |
1072 | 942 | ||
1073 | if (likely(s->init_pins)) | 943 | sci_init_pins(port, termios->c_cflag); |
1074 | s->init_pins(port, termios->c_cflag); | 944 | sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); |
1075 | 945 | ||
1076 | sci_out(port, SCSCR, SCSCR_INIT(port)); | 946 | sci_out(port, SCSCR, SCSCR_INIT(port)); |
1077 | 947 | ||
1078 | if ((termios->c_cflag & CREAD) != 0) | 948 | if ((termios->c_cflag & CREAD) != 0) |
1079 | sci_start_rx(port,0); | 949 | sci_start_rx(port, 0); |
1080 | } | 950 | } |
1081 | 951 | ||
1082 | static const char *sci_type(struct uart_port *port) | 952 | static const char *sci_type(struct uart_port *port) |
1083 | { | 953 | { |
1084 | switch (port->type) { | 954 | switch (port->type) { |
1085 | case PORT_SCI: return "sci"; | 955 | case PORT_IRDA: |
1086 | case PORT_SCIF: return "scif"; | 956 | return "irda"; |
1087 | case PORT_IRDA: return "irda"; | 957 | case PORT_SCI: |
1088 | case PORT_SCIFA: return "scifa"; | 958 | return "sci"; |
959 | case PORT_SCIF: | ||
960 | return "scif"; | ||
961 | case PORT_SCIFA: | ||
962 | return "scifa"; | ||
1089 | } | 963 | } |
1090 | 964 | ||
1091 | return NULL; | 965 | return NULL; |
@@ -1108,19 +982,6 @@ static void sci_config_port(struct uart_port *port, int flags) | |||
1108 | 982 | ||
1109 | port->type = s->type; | 983 | port->type = s->type; |
1110 | 984 | ||
1111 | switch (port->type) { | ||
1112 | case PORT_SCI: | ||
1113 | s->init_pins = sci_init_pins_sci; | ||
1114 | break; | ||
1115 | case PORT_SCIF: | ||
1116 | case PORT_SCIFA: | ||
1117 | s->init_pins = sci_init_pins_scif; | ||
1118 | break; | ||
1119 | case PORT_IRDA: | ||
1120 | s->init_pins = sci_init_pins_irda; | ||
1121 | break; | ||
1122 | } | ||
1123 | |||
1124 | if (port->flags & UPF_IOREMAP && !port->membase) { | 985 | if (port->flags & UPF_IOREMAP && !port->membase) { |
1125 | #if defined(CONFIG_SUPERH64) | 986 | #if defined(CONFIG_SUPERH64) |
1126 | port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); | 987 | port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); |
@@ -1129,7 +990,7 @@ static void sci_config_port(struct uart_port *port, int flags) | |||
1129 | port->membase = ioremap_nocache(port->mapbase, 0x40); | 990 | port->membase = ioremap_nocache(port->mapbase, 0x40); |
1130 | #endif | 991 | #endif |
1131 | 992 | ||
1132 | printk(KERN_ERR "sci: can't remap port#%d\n", port->line); | 993 | dev_err(port->dev, "can't remap port#%d\n", port->line); |
1133 | } | 994 | } |
1134 | } | 995 | } |
1135 | 996 | ||
@@ -1163,6 +1024,10 @@ static struct uart_ops sci_uart_ops = { | |||
1163 | .request_port = sci_request_port, | 1024 | .request_port = sci_request_port, |
1164 | .config_port = sci_config_port, | 1025 | .config_port = sci_config_port, |
1165 | .verify_port = sci_verify_port, | 1026 | .verify_port = sci_verify_port, |
1027 | #ifdef CONFIG_CONSOLE_POLL | ||
1028 | .poll_get_char = sci_poll_get_char, | ||
1029 | .poll_put_char = sci_poll_put_char, | ||
1030 | #endif | ||
1166 | }; | 1031 | }; |
1167 | 1032 | ||
1168 | static void __init sci_init_ports(void) | 1033 | static void __init sci_init_ports(void) |
@@ -1229,7 +1094,15 @@ int __init early_sci_setup(struct uart_port *port) | |||
1229 | static void serial_console_write(struct console *co, const char *s, | 1094 | static void serial_console_write(struct console *co, const char *s, |
1230 | unsigned count) | 1095 | unsigned count) |
1231 | { | 1096 | { |
1232 | put_string(serial_console_port, s, count); | 1097 | struct uart_port *port = &serial_console_port->port; |
1098 | int i; | ||
1099 | |||
1100 | for (i = 0; i < count; i++) { | ||
1101 | if (*s == 10) | ||
1102 | sci_poll_put_char(port, '\r'); | ||
1103 | |||
1104 | sci_poll_put_char(port, *s++); | ||
1105 | } | ||
1233 | } | 1106 | } |
1234 | 1107 | ||
1235 | static int __init serial_console_setup(struct console *co, char *options) | 1108 | static int __init serial_console_setup(struct console *co, char *options) |
@@ -1307,89 +1180,8 @@ static int __init sci_console_init(void) | |||
1307 | console_initcall(sci_console_init); | 1180 | console_initcall(sci_console_init); |
1308 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ | 1181 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ |
1309 | 1182 | ||
1310 | #ifdef CONFIG_SH_KGDB_CONSOLE | 1183 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) |
1311 | /* | 1184 | #define SCI_CONSOLE (&serial_console) |
1312 | * FIXME: Most of this can go away.. at the moment, we rely on | ||
1313 | * arch/sh/kernel/setup.c to do the command line parsing for kgdb, though | ||
1314 | * most of that can easily be done here instead. | ||
1315 | * | ||
1316 | * For the time being, just accept the values that were parsed earlier.. | ||
1317 | */ | ||
1318 | static void __init kgdb_console_get_options(struct uart_port *port, int *baud, | ||
1319 | int *parity, int *bits) | ||
1320 | { | ||
1321 | *baud = kgdb_baud; | ||
1322 | *parity = tolower(kgdb_parity); | ||
1323 | *bits = kgdb_bits - '0'; | ||
1324 | } | ||
1325 | |||
1326 | /* | ||
1327 | * The naming here is somewhat misleading, since kgdb_console_setup() takes | ||
1328 | * care of the early-on initialization for kgdb, regardless of whether we | ||
1329 | * actually use kgdb as a console or not. | ||
1330 | * | ||
1331 | * On the plus side, this lets us kill off the old kgdb_sci_setup() nonsense. | ||
1332 | */ | ||
1333 | int __init kgdb_console_setup(struct console *co, char *options) | ||
1334 | { | ||
1335 | struct uart_port *port = &sci_ports[kgdb_portnum].port; | ||
1336 | int baud = 38400; | ||
1337 | int bits = 8; | ||
1338 | int parity = 'n'; | ||
1339 | int flow = 'n'; | ||
1340 | |||
1341 | if (co->index != kgdb_portnum) | ||
1342 | co->index = kgdb_portnum; | ||
1343 | |||
1344 | kgdb_sci_port = &sci_ports[co->index]; | ||
1345 | port = &kgdb_sci_port->port; | ||
1346 | |||
1347 | /* | ||
1348 | * Also need to check port->type, we don't actually have any | ||
1349 | * UPIO_PORT ports, but uart_report_port() handily misreports | ||
1350 | * it anyways if we don't have a port available by the time this is | ||
1351 | * called. | ||
1352 | */ | ||
1353 | if (!port->type) | ||
1354 | return -ENODEV; | ||
1355 | if (!port->membase || !port->mapbase) | ||
1356 | return -ENODEV; | ||
1357 | |||
1358 | if (options) | ||
1359 | uart_parse_options(options, &baud, &parity, &bits, &flow); | ||
1360 | else | ||
1361 | kgdb_console_get_options(port, &baud, &parity, &bits); | ||
1362 | |||
1363 | kgdb_getchar = kgdb_sci_getchar; | ||
1364 | kgdb_putchar = kgdb_sci_putchar; | ||
1365 | |||
1366 | return uart_set_options(port, co, baud, parity, bits, flow); | ||
1367 | } | ||
1368 | |||
1369 | static struct console kgdb_console = { | ||
1370 | .name = "ttySC", | ||
1371 | .device = uart_console_device, | ||
1372 | .write = kgdb_console_write, | ||
1373 | .setup = kgdb_console_setup, | ||
1374 | .flags = CON_PRINTBUFFER, | ||
1375 | .index = -1, | ||
1376 | .data = &sci_uart_driver, | ||
1377 | }; | ||
1378 | |||
1379 | /* Register the KGDB console so we get messages (d'oh!) */ | ||
1380 | static int __init kgdb_console_init(void) | ||
1381 | { | ||
1382 | sci_init_ports(); | ||
1383 | register_console(&kgdb_console); | ||
1384 | return 0; | ||
1385 | } | ||
1386 | console_initcall(kgdb_console_init); | ||
1387 | #endif /* CONFIG_SH_KGDB_CONSOLE */ | ||
1388 | |||
1389 | #if defined(CONFIG_SH_KGDB_CONSOLE) | ||
1390 | #define SCI_CONSOLE &kgdb_console | ||
1391 | #elif defined(CONFIG_SERIAL_SH_SCI_CONSOLE) | ||
1392 | #define SCI_CONSOLE &serial_console | ||
1393 | #else | 1185 | #else |
1394 | #define SCI_CONSOLE 0 | 1186 | #define SCI_CONSOLE 0 |
1395 | #endif | 1187 | #endif |
@@ -1463,15 +1255,8 @@ static int __devinit sci_probe(struct platform_device *dev) | |||
1463 | uart_add_one_port(&sci_uart_driver, &sciport->port); | 1255 | uart_add_one_port(&sci_uart_driver, &sciport->port); |
1464 | } | 1256 | } |
1465 | 1257 | ||
1466 | #if defined(CONFIG_SH_KGDB) && !defined(CONFIG_SH_KGDB_CONSOLE) | 1258 | #ifdef CONFIG_HAVE_CLK |
1467 | kgdb_sci_port = &sci_ports[kgdb_portnum]; | ||
1468 | kgdb_getchar = kgdb_sci_getchar; | ||
1469 | kgdb_putchar = kgdb_sci_putchar; | ||
1470 | #endif | ||
1471 | |||
1472 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK) | ||
1473 | cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); | 1259 | cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); |
1474 | dev_info(&dev->dev, "CPU frequency notifier registered\n"); | ||
1475 | #endif | 1260 | #endif |
1476 | 1261 | ||
1477 | #ifdef CONFIG_SH_STANDARD_BIOS | 1262 | #ifdef CONFIG_SH_STANDARD_BIOS |
@@ -1491,6 +1276,10 @@ static int __devexit sci_remove(struct platform_device *dev) | |||
1491 | { | 1276 | { |
1492 | int i; | 1277 | int i; |
1493 | 1278 | ||
1279 | #ifdef CONFIG_HAVE_CLK | ||
1280 | cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); | ||
1281 | #endif | ||
1282 | |||
1494 | for (i = 0; i < SCI_NPORTS; i++) | 1283 | for (i = 0; i < SCI_NPORTS; i++) |
1495 | uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port); | 1284 | uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port); |
1496 | 1285 | ||
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 9f33b064172e..38c600c0dbbf 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
@@ -133,13 +133,20 @@ | |||
133 | # define SCSPTR5 0xffef0024 /* 16 bit SCIF */ | 133 | # define SCSPTR5 0xffef0024 /* 16 bit SCIF */ |
134 | # define SCIF_OPER 0x0001 /* Overrun error bit */ | 134 | # define SCIF_OPER 0x0001 /* Overrun error bit */ |
135 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 135 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ |
136 | #elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | 136 | #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ |
137 | defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | ||
137 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ | 138 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ |
138 | defined(CONFIG_CPU_SUBTYPE_SH7263) | 139 | defined(CONFIG_CPU_SUBTYPE_SH7263) |
139 | # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ | 140 | # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ |
140 | # define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ | 141 | # define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ |
141 | # define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ | 142 | # define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ |
142 | # define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ | 143 | # define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ |
144 | # if defined(CONFIG_CPU_SUBTYPE_SH7201) | ||
145 | # define SCSPTR4 0xfffeA020 /* 16 bit SCIF */ | ||
146 | # define SCSPTR5 0xfffeA820 /* 16 bit SCIF */ | ||
147 | # define SCSPTR6 0xfffeB020 /* 16 bit SCIF */ | ||
148 | # define SCSPTR7 0xfffeB820 /* 16 bit SCIF */ | ||
149 | # endif | ||
143 | # define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 150 | # define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ |
144 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | 151 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) |
145 | # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ | 152 | # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ |
@@ -225,6 +232,10 @@ | |||
225 | # define SCIF_TXROOM_MAX 16 | 232 | # define SCIF_TXROOM_MAX 16 |
226 | #endif | 233 | #endif |
227 | 234 | ||
235 | #ifndef SCIF_ORER | ||
236 | #define SCIF_ORER 0x0000 | ||
237 | #endif | ||
238 | |||
228 | #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) | 239 | #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) |
229 | #define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) | 240 | #define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) |
230 | #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) | 241 | #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) |
@@ -232,12 +243,7 @@ | |||
232 | #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) | 243 | #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) |
233 | #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) | 244 | #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) |
234 | #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) | 245 | #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) |
235 | 246 | #define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) | |
236 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) | ||
237 | # define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) | ||
238 | #else | ||
239 | # define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : 0x0000) | ||
240 | #endif | ||
241 | 247 | ||
242 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 248 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
243 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 249 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
@@ -501,18 +507,6 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
501 | { | 507 | { |
502 | return sci_in(port,SCxSR)&0x0010 ? 1 : 0; | 508 | return sci_in(port,SCxSR)&0x0010 ? 1 : 0; |
503 | } | 509 | } |
504 | static inline void set_sh771x_scif_pfc(struct uart_port *port) | ||
505 | { | ||
506 | if (port->mapbase == 0xA4400000){ | ||
507 | ctrl_outw(ctrl_inw(PACR)&0xffc0,PACR); | ||
508 | ctrl_outw(ctrl_inw(PBCR)&0x0fff,PBCR); | ||
509 | return; | ||
510 | } | ||
511 | if (port->mapbase == 0xA4410000){ | ||
512 | ctrl_outw(ctrl_inw(PBCR)&0xf003,PBCR); | ||
513 | return; | ||
514 | } | ||
515 | } | ||
516 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 510 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
517 | defined(CONFIG_CPU_SUBTYPE_SH7721) | 511 | defined(CONFIG_CPU_SUBTYPE_SH7721) |
518 | static inline int sci_rxd_in(struct uart_port *port) | 512 | static inline int sci_rxd_in(struct uart_port *port) |
@@ -664,7 +658,8 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
664 | return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ | 658 | return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ |
665 | return 1; | 659 | return 1; |
666 | } | 660 | } |
667 | #elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | 661 | #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ |
662 | defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | ||
668 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ | 663 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ |
669 | defined(CONFIG_CPU_SUBTYPE_SH7263) | 664 | defined(CONFIG_CPU_SUBTYPE_SH7263) |
670 | static inline int sci_rxd_in(struct uart_port *port) | 665 | static inline int sci_rxd_in(struct uart_port *port) |
@@ -677,6 +672,16 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
677 | return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | 672 | return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ |
678 | if (port->mapbase == 0xfffe9800) | 673 | if (port->mapbase == 0xfffe9800) |
679 | return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | 674 | return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ |
675 | #if defined(CONFIG_CPU_SUBTYPE_SH7201) | ||
676 | if (port->mapbase == 0xfffeA000) | ||
677 | return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
678 | if (port->mapbase == 0xfffeA800) | ||
679 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
680 | if (port->mapbase == 0xfffeB000) | ||
681 | return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
682 | if (port->mapbase == 0xfffeB800) | ||
683 | return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
684 | #endif | ||
680 | return 1; | 685 | return 1; |
681 | } | 686 | } |
682 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | 687 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index d1812d32f47d..63f0de29aa14 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -827,7 +827,7 @@ static int __init maple_bus_init(void) | |||
827 | 827 | ||
828 | maple_queue_cache = | 828 | maple_queue_cache = |
829 | kmem_cache_create("maple_queue_cache", 0x400, 0, | 829 | kmem_cache_create("maple_queue_cache", 0x400, 0, |
830 | SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); | 830 | SLAB_HWCACHE_ALIGN, NULL); |
831 | 831 | ||
832 | if (!maple_queue_cache) | 832 | if (!maple_queue_cache) |
833 | goto cleanup_bothirqs; | 833 | goto cleanup_bothirqs; |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index c95b286a1239..5d457c96bd7e 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -22,6 +22,8 @@ menuconfig STAGING | |||
22 | If in doubt, say N here. | 22 | If in doubt, say N here. |
23 | 23 | ||
24 | 24 | ||
25 | if STAGING | ||
26 | |||
25 | config STAGING_EXCLUDE_BUILD | 27 | config STAGING_EXCLUDE_BUILD |
26 | bool "Exclude Staging drivers from being built" if STAGING | 28 | bool "Exclude Staging drivers from being built" if STAGING |
27 | default y | 29 | default y |
@@ -62,3 +64,4 @@ source "drivers/staging/at76_usb/Kconfig" | |||
62 | source "drivers/staging/poch/Kconfig" | 64 | source "drivers/staging/poch/Kconfig" |
63 | 65 | ||
64 | endif # !STAGING_EXCLUDE_BUILD | 66 | endif # !STAGING_EXCLUDE_BUILD |
67 | endif # STAGING | ||
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 8e74657f106c..43a863c5cc43 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -51,6 +51,7 @@ static struct usb_device_id usbtmc_devices[] = { | |||
51 | { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, | 51 | { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, |
52 | { 0, } /* terminating entry */ | 52 | { 0, } /* terminating entry */ |
53 | }; | 53 | }; |
54 | MODULE_DEVICE_TABLE(usb, usbtmc_devices); | ||
54 | 55 | ||
55 | /* | 56 | /* |
56 | * This structure is the capabilities for the device | 57 | * This structure is the capabilities for the device |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 2bccefebff1b..aa79280df15d 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -574,6 +574,7 @@ static int usbdev_open(struct inode *inode, struct file *file) | |||
574 | { | 574 | { |
575 | struct usb_device *dev = NULL; | 575 | struct usb_device *dev = NULL; |
576 | struct dev_state *ps; | 576 | struct dev_state *ps; |
577 | const struct cred *cred = current_cred(); | ||
577 | int ret; | 578 | int ret; |
578 | 579 | ||
579 | lock_kernel(); | 580 | lock_kernel(); |
@@ -617,8 +618,8 @@ static int usbdev_open(struct inode *inode, struct file *file) | |||
617 | init_waitqueue_head(&ps->wait); | 618 | init_waitqueue_head(&ps->wait); |
618 | ps->discsignr = 0; | 619 | ps->discsignr = 0; |
619 | ps->disc_pid = get_pid(task_pid(current)); | 620 | ps->disc_pid = get_pid(task_pid(current)); |
620 | ps->disc_uid = current->uid; | 621 | ps->disc_uid = cred->uid; |
621 | ps->disc_euid = current->euid; | 622 | ps->disc_euid = cred->euid; |
622 | ps->disccontext = NULL; | 623 | ps->disccontext = NULL; |
623 | ps->ifclaimed = 0; | 624 | ps->ifclaimed = 0; |
624 | security_task_getsecid(current, &ps->secid); | 625 | security_task_getsecid(current, &ps->secid); |
@@ -967,6 +968,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, | |||
967 | struct usb_host_endpoint *ep; | 968 | struct usb_host_endpoint *ep; |
968 | struct async *as; | 969 | struct async *as; |
969 | struct usb_ctrlrequest *dr = NULL; | 970 | struct usb_ctrlrequest *dr = NULL; |
971 | const struct cred *cred = current_cred(); | ||
970 | unsigned int u, totlen, isofrmlen; | 972 | unsigned int u, totlen, isofrmlen; |
971 | int ret, ifnum = -1; | 973 | int ret, ifnum = -1; |
972 | int is_in; | 974 | int is_in; |
@@ -1174,8 +1176,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, | |||
1174 | as->signr = uurb->signr; | 1176 | as->signr = uurb->signr; |
1175 | as->ifnum = ifnum; | 1177 | as->ifnum = ifnum; |
1176 | as->pid = get_pid(task_pid(current)); | 1178 | as->pid = get_pid(task_pid(current)); |
1177 | as->uid = current->uid; | 1179 | as->uid = cred->uid; |
1178 | as->euid = current->euid; | 1180 | as->euid = cred->euid; |
1179 | security_task_getsecid(current, &as->secid); | 1181 | security_task_getsecid(current, &as->secid); |
1180 | if (!is_in) { | 1182 | if (!is_in) { |
1181 | if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, | 1183 | if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 3d7793d93031..8c081308b0e2 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -279,7 +279,9 @@ static int usb_unbind_interface(struct device *dev) | |||
279 | * altsetting means creating new endpoint device entries). | 279 | * altsetting means creating new endpoint device entries). |
280 | * When either of these happens, defer the Set-Interface. | 280 | * When either of these happens, defer the Set-Interface. |
281 | */ | 281 | */ |
282 | if (!error && intf->dev.power.status == DPM_ON) | 282 | if (intf->cur_altsetting->desc.bAlternateSetting == 0) |
283 | ; /* Already in altsetting 0 so skip Set-Interface */ | ||
284 | else if (!error && intf->dev.power.status == DPM_ON) | ||
283 | usb_set_interface(udev, intf->altsetting[0]. | 285 | usb_set_interface(udev, intf->altsetting[0]. |
284 | desc.bInterfaceNumber, 0); | 286 | desc.bInterfaceNumber, 0); |
285 | else | 287 | else |
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 94632264dccf..185be760833e 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c | |||
@@ -277,8 +277,8 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de | |||
277 | 277 | ||
278 | if (inode) { | 278 | if (inode) { |
279 | inode->i_mode = mode; | 279 | inode->i_mode = mode; |
280 | inode->i_uid = current->fsuid; | 280 | inode->i_uid = current_fsuid(); |
281 | inode->i_gid = current->fsgid; | 281 | inode->i_gid = current_fsgid(); |
282 | inode->i_blocks = 0; | 282 | inode->i_blocks = 0; |
283 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 283 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
284 | switch (mode & S_IFMT) { | 284 | switch (mode & S_IFMT) { |
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 428b5993575a..3a8bb53fc473 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
@@ -651,6 +651,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) | |||
651 | fs_in_desc.bEndpointAddress; | 651 | fs_in_desc.bEndpointAddress; |
652 | hs_out_desc.bEndpointAddress = | 652 | hs_out_desc.bEndpointAddress = |
653 | fs_out_desc.bEndpointAddress; | 653 | fs_out_desc.bEndpointAddress; |
654 | hs_notify_desc.bEndpointAddress = | ||
655 | fs_notify_desc.bEndpointAddress; | ||
654 | 656 | ||
655 | /* copy descriptors, and track endpoint copies */ | 657 | /* copy descriptors, and track endpoint copies */ |
656 | f->hs_descriptors = usb_copy_descriptors(eth_hs_function); | 658 | f->hs_descriptors = usb_copy_descriptors(eth_hs_function); |
@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) | |||
662 | f->hs_descriptors, &hs_in_desc); | 664 | f->hs_descriptors, &hs_in_desc); |
663 | rndis->hs.out = usb_find_endpoint(eth_hs_function, | 665 | rndis->hs.out = usb_find_endpoint(eth_hs_function, |
664 | f->hs_descriptors, &hs_out_desc); | 666 | f->hs_descriptors, &hs_out_desc); |
667 | rndis->hs.notify = usb_find_endpoint(eth_hs_function, | ||
668 | f->hs_descriptors, &hs_notify_desc); | ||
665 | } | 669 | } |
666 | 670 | ||
667 | rndis->port.open = rndis_open; | 671 | rndis->port.open = rndis_open; |
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 77b44fb48f0a..3a8879ec2061 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c | |||
@@ -623,7 +623,6 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) | |||
623 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) | 623 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) |
624 | static void init_controller(struct m66592 *m66592) | 624 | static void init_controller(struct m66592 *m66592) |
625 | { | 625 | { |
626 | usbf_start_clock(); | ||
627 | m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ | 626 | m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ |
628 | m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); | 627 | m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); |
629 | m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); | 628 | m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); |
@@ -671,9 +670,7 @@ static void init_controller(struct m66592 *m66592) | |||
671 | 670 | ||
672 | static void disable_controller(struct m66592 *m66592) | 671 | static void disable_controller(struct m66592 *m66592) |
673 | { | 672 | { |
674 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) | 673 | #if !defined(CONFIG_SUPERH_BUILT_IN_M66592) |
675 | usbf_stop_clock(); | ||
676 | #else | ||
677 | m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); | 674 | m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); |
678 | udelay(1); | 675 | udelay(1); |
679 | m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); | 676 | m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); |
@@ -686,9 +683,7 @@ static void disable_controller(struct m66592 *m66592) | |||
686 | 683 | ||
687 | static void m66592_start_xclock(struct m66592 *m66592) | 684 | static void m66592_start_xclock(struct m66592 *m66592) |
688 | { | 685 | { |
689 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) | 686 | #if !defined(CONFIG_SUPERH_BUILT_IN_M66592) |
690 | usbf_start_clock(); | ||
691 | #else | ||
692 | u16 tmp; | 687 | u16 tmp; |
693 | 688 | ||
694 | tmp = m66592_read(m66592, M66592_SYSCFG); | 689 | tmp = m66592_read(m66592, M66592_SYSCFG); |
@@ -1539,7 +1534,10 @@ static int __exit m66592_remove(struct platform_device *pdev) | |||
1539 | iounmap(m66592->reg); | 1534 | iounmap(m66592->reg); |
1540 | free_irq(platform_get_irq(pdev, 0), m66592); | 1535 | free_irq(platform_get_irq(pdev, 0), m66592); |
1541 | m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); | 1536 | m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); |
1542 | usbf_stop_clock(); | 1537 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) |
1538 | clk_disable(m66592->clk); | ||
1539 | clk_put(m66592->clk); | ||
1540 | #endif | ||
1543 | kfree(m66592); | 1541 | kfree(m66592); |
1544 | return 0; | 1542 | return 0; |
1545 | } | 1543 | } |
@@ -1556,6 +1554,9 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1556 | int irq; | 1554 | int irq; |
1557 | void __iomem *reg = NULL; | 1555 | void __iomem *reg = NULL; |
1558 | struct m66592 *m66592 = NULL; | 1556 | struct m66592 *m66592 = NULL; |
1557 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) | ||
1558 | char clk_name[8]; | ||
1559 | #endif | ||
1559 | int ret = 0; | 1560 | int ret = 0; |
1560 | int i; | 1561 | int i; |
1561 | 1562 | ||
@@ -1614,6 +1615,16 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1614 | goto clean_up; | 1615 | goto clean_up; |
1615 | } | 1616 | } |
1616 | 1617 | ||
1618 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) | ||
1619 | snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); | ||
1620 | m66592->clk = clk_get(&pdev->dev, clk_name); | ||
1621 | if (IS_ERR(m66592->clk)) { | ||
1622 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
1623 | ret = PTR_ERR(m66592->clk); | ||
1624 | goto clean_up2; | ||
1625 | } | ||
1626 | clk_enable(m66592->clk); | ||
1627 | #endif | ||
1617 | INIT_LIST_HEAD(&m66592->gadget.ep_list); | 1628 | INIT_LIST_HEAD(&m66592->gadget.ep_list); |
1618 | m66592->gadget.ep0 = &m66592->ep[0].ep; | 1629 | m66592->gadget.ep0 = &m66592->ep[0].ep; |
1619 | INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list); | 1630 | INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list); |
@@ -1645,7 +1656,7 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1645 | 1656 | ||
1646 | m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); | 1657 | m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); |
1647 | if (m66592->ep0_req == NULL) | 1658 | if (m66592->ep0_req == NULL) |
1648 | goto clean_up2; | 1659 | goto clean_up3; |
1649 | m66592->ep0_req->complete = nop_completion; | 1660 | m66592->ep0_req->complete = nop_completion; |
1650 | 1661 | ||
1651 | init_controller(m66592); | 1662 | init_controller(m66592); |
@@ -1653,7 +1664,12 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1653 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); | 1664 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); |
1654 | return 0; | 1665 | return 0; |
1655 | 1666 | ||
1667 | clean_up3: | ||
1668 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) | ||
1669 | clk_disable(m66592->clk); | ||
1670 | clk_put(m66592->clk); | ||
1656 | clean_up2: | 1671 | clean_up2: |
1672 | #endif | ||
1657 | free_irq(irq, m66592); | 1673 | free_irq(irq, m66592); |
1658 | clean_up: | 1674 | clean_up: |
1659 | if (m66592) { | 1675 | if (m66592) { |
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h index f118f00f1466..286ce07e7960 100644 --- a/drivers/usb/gadget/m66592-udc.h +++ b/drivers/usb/gadget/m66592-udc.h | |||
@@ -23,6 +23,10 @@ | |||
23 | #ifndef __M66592_UDC_H__ | 23 | #ifndef __M66592_UDC_H__ |
24 | #define __M66592_UDC_H__ | 24 | #define __M66592_UDC_H__ |
25 | 25 | ||
26 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) | ||
27 | #include <linux/clk.h> | ||
28 | #endif | ||
29 | |||
26 | #define M66592_SYSCFG 0x00 | 30 | #define M66592_SYSCFG 0x00 |
27 | #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ | 31 | #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ |
28 | #define M66592_XTAL48 0x8000 /* 48MHz */ | 32 | #define M66592_XTAL48 0x8000 /* 48MHz */ |
@@ -476,6 +480,9 @@ struct m66592_ep { | |||
476 | struct m66592 { | 480 | struct m66592 { |
477 | spinlock_t lock; | 481 | spinlock_t lock; |
478 | void __iomem *reg; | 482 | void __iomem *reg; |
483 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) | ||
484 | struct clk *clk; | ||
485 | #endif | ||
479 | 486 | ||
480 | struct usb_gadget gadget; | 487 | struct usb_gadget gadget; |
481 | struct usb_gadget_driver *driver; | 488 | struct usb_gadget_driver *driver; |
@@ -604,26 +611,6 @@ static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, | |||
604 | #define m66592_bset(m66592, val, offset) \ | 611 | #define m66592_bset(m66592, val, offset) \ |
605 | m66592_mdfy(m66592, val, 0, offset) | 612 | m66592_mdfy(m66592, val, 0, offset) |
606 | 613 | ||
607 | #if defined(CONFIG_SUPERH_BUILT_IN_M66592) | ||
608 | #include <asm/io.h> | ||
609 | #define MSTPCR2 0xA4150038 /* for SH7722 */ | ||
610 | #define MSTPCR2_USB 0x00000800 | ||
611 | |||
612 | static inline void usbf_start_clock(void) | ||
613 | { | ||
614 | ctrl_outl(ctrl_inl(MSTPCR2) & ~MSTPCR2_USB, MSTPCR2); | ||
615 | } | ||
616 | |||
617 | static inline void usbf_stop_clock(void) | ||
618 | { | ||
619 | ctrl_outl(ctrl_inl(MSTPCR2) | MSTPCR2_USB, MSTPCR2); | ||
620 | } | ||
621 | |||
622 | #else | ||
623 | #define usbf_start_clock(x) | ||
624 | #define usbf_stop_clock(x) | ||
625 | #endif /* if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ | ||
626 | |||
627 | #endif /* ifndef __M66592_UDC_H__ */ | 614 | #endif /* ifndef __M66592_UDC_H__ */ |
628 | 615 | ||
629 | 616 | ||
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 2376f24f3c83..c21f14e0666a 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -114,6 +114,9 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) | |||
114 | int i = 0; | 114 | int i = 0; |
115 | 115 | ||
116 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | 116 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) |
117 | #if defined(CONFIG_HAVE_CLK) | ||
118 | clk_enable(r8a66597->clk); | ||
119 | #endif | ||
117 | do { | 120 | do { |
118 | r8a66597_write(r8a66597, SCKE, SYSCFG0); | 121 | r8a66597_write(r8a66597, SCKE, SYSCFG0); |
119 | tmp = r8a66597_read(r8a66597, SYSCFG0); | 122 | tmp = r8a66597_read(r8a66597, SYSCFG0); |
@@ -154,7 +157,11 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597) | |||
154 | { | 157 | { |
155 | r8a66597_bclr(r8a66597, SCKE, SYSCFG0); | 158 | r8a66597_bclr(r8a66597, SCKE, SYSCFG0); |
156 | udelay(1); | 159 | udelay(1); |
157 | #if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | 160 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) |
161 | #if defined(CONFIG_HAVE_CLK) | ||
162 | clk_disable(r8a66597->clk); | ||
163 | #endif | ||
164 | #else | ||
158 | r8a66597_bclr(r8a66597, PLLC, SYSCFG0); | 165 | r8a66597_bclr(r8a66597, PLLC, SYSCFG0); |
159 | r8a66597_bclr(r8a66597, XCKE, SYSCFG0); | 166 | r8a66597_bclr(r8a66597, XCKE, SYSCFG0); |
160 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); | 167 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); |
@@ -2261,6 +2268,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) | |||
2261 | del_timer_sync(&r8a66597->rh_timer); | 2268 | del_timer_sync(&r8a66597->rh_timer); |
2262 | usb_remove_hcd(hcd); | 2269 | usb_remove_hcd(hcd); |
2263 | iounmap((void *)r8a66597->reg); | 2270 | iounmap((void *)r8a66597->reg); |
2271 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | ||
2272 | clk_put(r8a66597->clk); | ||
2273 | #endif | ||
2264 | usb_put_hcd(hcd); | 2274 | usb_put_hcd(hcd); |
2265 | return 0; | 2275 | return 0; |
2266 | } | 2276 | } |
@@ -2268,6 +2278,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) | |||
2268 | #define resource_len(r) (((r)->end - (r)->start) + 1) | 2278 | #define resource_len(r) (((r)->end - (r)->start) + 1) |
2269 | static int __init r8a66597_probe(struct platform_device *pdev) | 2279 | static int __init r8a66597_probe(struct platform_device *pdev) |
2270 | { | 2280 | { |
2281 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | ||
2282 | char clk_name[8]; | ||
2283 | #endif | ||
2271 | struct resource *res = NULL, *ires; | 2284 | struct resource *res = NULL, *ires; |
2272 | int irq = -1; | 2285 | int irq = -1; |
2273 | void __iomem *reg = NULL; | 2286 | void __iomem *reg = NULL; |
@@ -2320,6 +2333,16 @@ static int __init r8a66597_probe(struct platform_device *pdev) | |||
2320 | memset(r8a66597, 0, sizeof(struct r8a66597)); | 2333 | memset(r8a66597, 0, sizeof(struct r8a66597)); |
2321 | dev_set_drvdata(&pdev->dev, r8a66597); | 2334 | dev_set_drvdata(&pdev->dev, r8a66597); |
2322 | 2335 | ||
2336 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | ||
2337 | snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); | ||
2338 | r8a66597->clk = clk_get(&pdev->dev, clk_name); | ||
2339 | if (IS_ERR(r8a66597->clk)) { | ||
2340 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
2341 | ret = PTR_ERR(r8a66597->clk); | ||
2342 | goto clean_up2; | ||
2343 | } | ||
2344 | #endif | ||
2345 | |||
2323 | spin_lock_init(&r8a66597->lock); | 2346 | spin_lock_init(&r8a66597->lock); |
2324 | init_timer(&r8a66597->rh_timer); | 2347 | init_timer(&r8a66597->rh_timer); |
2325 | r8a66597->rh_timer.function = r8a66597_timer; | 2348 | r8a66597->rh_timer.function = r8a66597_timer; |
@@ -2365,11 +2388,18 @@ static int __init r8a66597_probe(struct platform_device *pdev) | |||
2365 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); | 2388 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); |
2366 | if (ret != 0) { | 2389 | if (ret != 0) { |
2367 | dev_err(&pdev->dev, "Failed to add hcd\n"); | 2390 | dev_err(&pdev->dev, "Failed to add hcd\n"); |
2368 | goto clean_up; | 2391 | goto clean_up3; |
2369 | } | 2392 | } |
2370 | 2393 | ||
2371 | return 0; | 2394 | return 0; |
2372 | 2395 | ||
2396 | clean_up3: | ||
2397 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | ||
2398 | clk_put(r8a66597->clk); | ||
2399 | clean_up2: | ||
2400 | #endif | ||
2401 | usb_put_hcd(hcd); | ||
2402 | |||
2373 | clean_up: | 2403 | clean_up: |
2374 | if (reg) | 2404 | if (reg) |
2375 | iounmap(reg); | 2405 | iounmap(reg); |
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 84ee01417315..ecacde4d69b0 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h | |||
@@ -26,6 +26,10 @@ | |||
26 | #ifndef __R8A66597_H__ | 26 | #ifndef __R8A66597_H__ |
27 | #define __R8A66597_H__ | 27 | #define __R8A66597_H__ |
28 | 28 | ||
29 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | ||
30 | #include <linux/clk.h> | ||
31 | #endif | ||
32 | |||
29 | #define SYSCFG0 0x00 | 33 | #define SYSCFG0 0x00 |
30 | #define SYSCFG1 0x02 | 34 | #define SYSCFG1 0x02 |
31 | #define SYSSTS0 0x04 | 35 | #define SYSSTS0 0x04 |
@@ -481,7 +485,9 @@ struct r8a66597_root_hub { | |||
481 | struct r8a66597 { | 485 | struct r8a66597 { |
482 | spinlock_t lock; | 486 | spinlock_t lock; |
483 | unsigned long reg; | 487 | unsigned long reg; |
484 | 488 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | |
489 | struct clk *clk; | ||
490 | #endif | ||
485 | struct r8a66597_device device0; | 491 | struct r8a66597_device device0; |
486 | struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB]; | 492 | struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB]; |
487 | struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE]; | 493 | struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE]; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index aad1359a3eb1..fb6f2933b01b 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -143,6 +143,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { | |||
143 | static struct usb_device_id id_table_combined [] = { | 143 | static struct usb_device_id id_table_combined [] = { |
144 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, | 144 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
145 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 145 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
146 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | ||
146 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, | 147 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
147 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, | 148 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, |
148 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, | 149 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, |
@@ -166,6 +167,7 @@ static struct usb_device_id id_table_combined [] = { | |||
166 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, | 167 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, |
167 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, | 168 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
168 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, | 169 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
170 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, | ||
169 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, | 171 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, |
170 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, | 172 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, |
171 | { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, | 173 | { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 07a3992abad2..373ee09975bb 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -40,6 +40,9 @@ | |||
40 | /* AlphaMicro Components AMC-232USB01 device */ | 40 | /* AlphaMicro Components AMC-232USB01 device */ |
41 | #define FTDI_AMC232_PID 0xFF00 /* Product Id */ | 41 | #define FTDI_AMC232_PID 0xFF00 /* Product Id */ |
42 | 42 | ||
43 | /* www.candapter.com Ewert Energy Systems CANdapter device */ | ||
44 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ | ||
45 | |||
43 | /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ | 46 | /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ |
44 | /* the VID is the standard ftdi vid (FTDI_VID) */ | 47 | /* the VID is the standard ftdi vid (FTDI_VID) */ |
45 | #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ | 48 | #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ |
@@ -75,6 +78,9 @@ | |||
75 | /* OpenDCC (www.opendcc.de) product id */ | 78 | /* OpenDCC (www.opendcc.de) product id */ |
76 | #define FTDI_OPENDCC_PID 0xBFD8 | 79 | #define FTDI_OPENDCC_PID 0xBFD8 |
77 | 80 | ||
81 | /* Sprog II (Andrew Crosland's SprogII DCC interface) */ | ||
82 | #define FTDI_SPROG_II 0xF0C8 | ||
83 | |||
78 | /* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */ | 84 | /* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */ |
79 | /* they use the ftdi chipset for the USB interface and the vendor id is the same */ | 85 | /* they use the ftdi chipset for the USB interface and the vendor id is the same */ |
80 | #define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ | 86 | #define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 491c8857b644..1aed584be5eb 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -91,6 +91,8 @@ static struct usb_device_id id_table [] = { | |||
91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, | 91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, |
92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, | 92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, |
93 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, | 93 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, |
94 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, | ||
95 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, | ||
94 | { } /* Terminating entry */ | 96 | { } /* Terminating entry */ |
95 | }; | 97 | }; |
96 | 98 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a3bd039c78e9..54974f446a8c 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -110,3 +110,11 @@ | |||
110 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ | 110 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ |
111 | #define YCCABLE_VENDOR_ID 0x05ad | 111 | #define YCCABLE_VENDOR_ID 0x05ad |
112 | #define YCCABLE_PRODUCT_ID 0x0fba | 112 | #define YCCABLE_PRODUCT_ID 0x0fba |
113 | |||
114 | /* "Superial" USB - Serial */ | ||
115 | #define SUPERIAL_VENDOR_ID 0x5372 | ||
116 | #define SUPERIAL_PRODUCT_ID 0x2303 | ||
117 | |||
118 | /* Hewlett-Packard LD220-HP POS Pole Display */ | ||
119 | #define HP_VENDOR_ID 0x03f0 | ||
120 | #define HP_LD220_PRODUCT_ID 0x3524 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 31c42d1cae13..01d0c70d60e9 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -16,56 +16,6 @@ | |||
16 | * For questions or problems with this driver, contact Texas Instruments | 16 | * For questions or problems with this driver, contact Texas Instruments |
17 | * technical support, or Al Borchers <alborchers@steinerpoint.com>, or | 17 | * technical support, or Al Borchers <alborchers@steinerpoint.com>, or |
18 | * Peter Berger <pberger@brimson.com>. | 18 | * Peter Berger <pberger@brimson.com>. |
19 | * | ||
20 | * This driver needs this hotplug script in /etc/hotplug/usb/ti_usb_3410_5052 | ||
21 | * or in /etc/hotplug.d/usb/ti_usb_3410_5052.hotplug to set the device | ||
22 | * configuration. | ||
23 | * | ||
24 | * #!/bin/bash | ||
25 | * | ||
26 | * BOOT_CONFIG=1 | ||
27 | * ACTIVE_CONFIG=2 | ||
28 | * | ||
29 | * if [[ "$ACTION" != "add" ]] | ||
30 | * then | ||
31 | * exit | ||
32 | * fi | ||
33 | * | ||
34 | * CONFIG_PATH=/sys${DEVPATH%/?*}/bConfigurationValue | ||
35 | * | ||
36 | * if [[ 0`cat $CONFIG_PATH` -ne $BOOT_CONFIG ]] | ||
37 | * then | ||
38 | * exit | ||
39 | * fi | ||
40 | * | ||
41 | * PRODUCT=${PRODUCT%/?*} # delete version | ||
42 | * VENDOR_ID=`printf "%d" 0x${PRODUCT%/?*}` | ||
43 | * PRODUCT_ID=`printf "%d" 0x${PRODUCT#*?/}` | ||
44 | * | ||
45 | * PARAM_PATH=/sys/module/ti_usb_3410_5052/parameters | ||
46 | * | ||
47 | * function scan() { | ||
48 | * s=$1 | ||
49 | * shift | ||
50 | * for i | ||
51 | * do | ||
52 | * if [[ $s -eq $i ]] | ||
53 | * then | ||
54 | * return 0 | ||
55 | * fi | ||
56 | * done | ||
57 | * return 1 | ||
58 | * } | ||
59 | * | ||
60 | * IFS=$IFS, | ||
61 | * | ||
62 | * if (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_3410` && | ||
63 | * scan $PRODUCT_ID 13328 `cat $PARAM_PATH/product_3410`) || | ||
64 | * (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_5052` && | ||
65 | * scan $PRODUCT_ID 20562 20818 20570 20575 `cat $PARAM_PATH/product_5052`) | ||
66 | * then | ||
67 | * echo $ACTIVE_CONFIG > $CONFIG_PATH | ||
68 | * fi | ||
69 | */ | 19 | */ |
70 | 20 | ||
71 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -457,9 +407,10 @@ static int ti_startup(struct usb_serial *serial) | |||
457 | goto free_tdev; | 407 | goto free_tdev; |
458 | } | 408 | } |
459 | 409 | ||
460 | /* the second configuration must be set (in sysfs by hotplug script) */ | 410 | /* the second configuration must be set */ |
461 | if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { | 411 | if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { |
462 | status = -ENODEV; | 412 | status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG); |
413 | status = status ? status : -ENODEV; | ||
463 | goto free_tdev; | 414 | goto free_tdev; |
464 | } | 415 | } |
465 | 416 | ||
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index e61f2bfc64ad..bfcc1fe82518 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -167,8 +167,22 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600, | |||
167 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 167 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
168 | US_FL_FIX_CAPACITY ), | 168 | US_FL_FIX_CAPACITY ), |
169 | 169 | ||
170 | /* Reported by Ozan Sener <themgzzy@gmail.com> */ | ||
171 | UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551, | ||
172 | "Nokia", | ||
173 | "3500c", | ||
174 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
175 | US_FL_FIX_CAPACITY ), | ||
176 | |||
177 | /* Reported by CSECSY Laszlo <boobaa@frugalware.org> */ | ||
178 | UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601, | ||
179 | "Nokia", | ||
180 | "Nokia 3109c", | ||
181 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
182 | US_FL_FIX_CAPACITY ), | ||
183 | |||
170 | /* Patch for Nokia 5310 capacity */ | 184 | /* Patch for Nokia 5310 capacity */ |
171 | UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591, | 185 | UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701, |
172 | "Nokia", | 186 | "Nokia", |
173 | "5310", | 187 | "5310", |
174 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 188 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 3f3ce13fef43..d0c821992a99 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -1889,10 +1889,11 @@ config FB_W100 | |||
1889 | config FB_SH_MOBILE_LCDC | 1889 | config FB_SH_MOBILE_LCDC |
1890 | tristate "SuperH Mobile LCDC framebuffer support" | 1890 | tristate "SuperH Mobile LCDC framebuffer support" |
1891 | depends on FB && SUPERH | 1891 | depends on FB && SUPERH |
1892 | select FB_CFB_FILLRECT | 1892 | select FB_SYS_FILLRECT |
1893 | select FB_CFB_COPYAREA | 1893 | select FB_SYS_COPYAREA |
1894 | select FB_CFB_IMAGEBLIT | 1894 | select FB_SYS_IMAGEBLIT |
1895 | default m | 1895 | select FB_SYS_FOPS |
1896 | select FB_DEFERRED_IO | ||
1896 | ---help--- | 1897 | ---help--- |
1897 | Frame buffer driver for the on-chip SH-Mobile LCD controller. | 1898 | Frame buffer driver for the on-chip SH-Mobile LCD controller. |
1898 | 1899 | ||
@@ -2021,17 +2022,19 @@ config FB_COBALT | |||
2021 | depends on FB && MIPS_COBALT | 2022 | depends on FB && MIPS_COBALT |
2022 | 2023 | ||
2023 | config FB_SH7760 | 2024 | config FB_SH7760 |
2024 | bool "SH7760/SH7763 LCDC support" | 2025 | bool "SH7760/SH7763/SH7720/SH7721 LCDC support" |
2025 | depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763) | 2026 | depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ |
2026 | select FB_CFB_FILLRECT | 2027 | || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721) |
2027 | select FB_CFB_COPYAREA | 2028 | select FB_CFB_FILLRECT |
2028 | select FB_CFB_IMAGEBLIT | 2029 | select FB_CFB_COPYAREA |
2029 | help | 2030 | select FB_CFB_IMAGEBLIT |
2030 | Support for the SH7760/SH7763 integrated (D)STN/TFT LCD Controller. | 2031 | ---help--- |
2031 | Supports display resolutions up to 1024x1024 pixel, grayscale and | 2032 | Support for the SH7760/SH7763/SH7720/SH7721 integrated |
2032 | color operation, with depths ranging from 1 bpp to 8 bpp monochrome | 2033 | (D)STN/TFT LCD Controller. |
2033 | and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for | 2034 | Supports display resolutions up to 1024x1024 pixel, grayscale and |
2034 | panels <= 320 pixel horizontal resolution. | 2035 | color operation, with depths ranging from 1 bpp to 8 bpp monochrome |
2036 | and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for | ||
2037 | panels <= 320 pixel horizontal resolution. | ||
2035 | 2038 | ||
2036 | config FB_VIRTUAL | 2039 | config FB_VIRTUAL |
2037 | tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" | 2040 | tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" |
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 4835bdc4e9f1..082026546aee 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c | |||
@@ -24,6 +24,19 @@ | |||
24 | #include <linux/rmap.h> | 24 | #include <linux/rmap.h> |
25 | #include <linux/pagemap.h> | 25 | #include <linux/pagemap.h> |
26 | 26 | ||
27 | struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) | ||
28 | { | ||
29 | void *screen_base = (void __force *) info->screen_base; | ||
30 | struct page *page; | ||
31 | |||
32 | if (is_vmalloc_addr(screen_base + offs)) | ||
33 | page = vmalloc_to_page(screen_base + offs); | ||
34 | else | ||
35 | page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); | ||
36 | |||
37 | return page; | ||
38 | } | ||
39 | |||
27 | /* this is to find and return the vmalloc-ed fb pages */ | 40 | /* this is to find and return the vmalloc-ed fb pages */ |
28 | static int fb_deferred_io_fault(struct vm_area_struct *vma, | 41 | static int fb_deferred_io_fault(struct vm_area_struct *vma, |
29 | struct vm_fault *vmf) | 42 | struct vm_fault *vmf) |
@@ -31,14 +44,12 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, | |||
31 | unsigned long offset; | 44 | unsigned long offset; |
32 | struct page *page; | 45 | struct page *page; |
33 | struct fb_info *info = vma->vm_private_data; | 46 | struct fb_info *info = vma->vm_private_data; |
34 | /* info->screen_base is virtual memory */ | ||
35 | void *screen_base = (void __force *) info->screen_base; | ||
36 | 47 | ||
37 | offset = vmf->pgoff << PAGE_SHIFT; | 48 | offset = vmf->pgoff << PAGE_SHIFT; |
38 | if (offset >= info->fix.smem_len) | 49 | if (offset >= info->fix.smem_len) |
39 | return VM_FAULT_SIGBUS; | 50 | return VM_FAULT_SIGBUS; |
40 | 51 | ||
41 | page = vmalloc_to_page(screen_base + offset); | 52 | page = fb_deferred_io_page(info, offset); |
42 | if (!page) | 53 | if (!page) |
43 | return VM_FAULT_SIGBUS; | 54 | return VM_FAULT_SIGBUS; |
44 | 55 | ||
@@ -60,6 +71,10 @@ int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
60 | { | 71 | { |
61 | struct fb_info *info = file->private_data; | 72 | struct fb_info *info = file->private_data; |
62 | 73 | ||
74 | /* Skip if deferred io is complied-in but disabled on this fbdev */ | ||
75 | if (!info->fbdefio) | ||
76 | return 0; | ||
77 | |||
63 | /* Kill off the delayed work */ | 78 | /* Kill off the delayed work */ |
64 | cancel_rearming_delayed_work(&info->deferred_work); | 79 | cancel_rearming_delayed_work(&info->deferred_work); |
65 | 80 | ||
@@ -184,7 +199,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); | |||
184 | 199 | ||
185 | void fb_deferred_io_cleanup(struct fb_info *info) | 200 | void fb_deferred_io_cleanup(struct fb_info *info) |
186 | { | 201 | { |
187 | void *screen_base = (void __force *) info->screen_base; | ||
188 | struct fb_deferred_io *fbdefio = info->fbdefio; | 202 | struct fb_deferred_io *fbdefio = info->fbdefio; |
189 | struct page *page; | 203 | struct page *page; |
190 | int i; | 204 | int i; |
@@ -195,9 +209,12 @@ void fb_deferred_io_cleanup(struct fb_info *info) | |||
195 | 209 | ||
196 | /* clear out the mapping that we setup */ | 210 | /* clear out the mapping that we setup */ |
197 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { | 211 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { |
198 | page = vmalloc_to_page(screen_base + i); | 212 | page = fb_deferred_io_page(info, i); |
199 | page->mapping = NULL; | 213 | page->mapping = NULL; |
200 | } | 214 | } |
215 | |||
216 | info->fbops->fb_mmap = NULL; | ||
217 | mutex_destroy(&fbdefio->lock); | ||
201 | } | 218 | } |
202 | EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); | 219 | EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); |
203 | 220 | ||
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index 8d0212da4514..653bdfee3057 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c | |||
@@ -13,6 +13,8 @@ | |||
13 | * | 13 | * |
14 | * Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de> | 14 | * Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de> |
15 | * for his original source and testing! | 15 | * for his original source and testing! |
16 | * | ||
17 | * sh7760_setcolreg get from drivers/video/sh_mobile_lcdcfb.c | ||
16 | */ | 18 | */ |
17 | 19 | ||
18 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
@@ -53,29 +55,6 @@ static irqreturn_t sh7760fb_irq(int irq, void *data) | |||
53 | return IRQ_HANDLED; | 55 | return IRQ_HANDLED; |
54 | } | 56 | } |
55 | 57 | ||
56 | static void sh7760fb_wait_vsync(struct fb_info *info) | ||
57 | { | ||
58 | struct sh7760fb_par *par = info->par; | ||
59 | |||
60 | if (par->pd->novsync) | ||
61 | return; | ||
62 | |||
63 | iowrite16(ioread16(par->base + LDINTR) & ~VINT_CHECK, | ||
64 | par->base + LDINTR); | ||
65 | |||
66 | if (par->irq < 0) { | ||
67 | /* poll for vert. retrace: status bit is sticky */ | ||
68 | while (!(ioread16(par->base + LDINTR) & VINT_CHECK)) | ||
69 | cpu_relax(); | ||
70 | } else { | ||
71 | /* a "wait_for_irq_event(par->irq)" would be extremely nice */ | ||
72 | init_completion(&par->vsync); | ||
73 | enable_irq(par->irq); | ||
74 | wait_for_completion(&par->vsync); | ||
75 | disable_irq_nosync(par->irq); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* wait_for_lps - wait until power supply has reached a certain state. */ | 58 | /* wait_for_lps - wait until power supply has reached a certain state. */ |
80 | static int wait_for_lps(struct sh7760fb_par *par, int val) | 59 | static int wait_for_lps(struct sh7760fb_par *par, int val) |
81 | { | 60 | { |
@@ -117,55 +96,28 @@ static int sh7760fb_blank(int blank, struct fb_info *info) | |||
117 | return wait_for_lps(par, lps); | 96 | return wait_for_lps(par, lps); |
118 | } | 97 | } |
119 | 98 | ||
120 | /* set color registers */ | 99 | static int sh7760_setcolreg (u_int regno, |
121 | static int sh7760fb_setcmap(struct fb_cmap *cmap, struct fb_info *info) | 100 | u_int red, u_int green, u_int blue, |
101 | u_int transp, struct fb_info *info) | ||
122 | { | 102 | { |
123 | struct sh7760fb_par *par = info->par; | 103 | u32 *palette = info->pseudo_palette; |
124 | u32 s = cmap->start; | ||
125 | u32 l = cmap->len; | ||
126 | u16 *r = cmap->red; | ||
127 | u16 *g = cmap->green; | ||
128 | u16 *b = cmap->blue; | ||
129 | u32 col, tmo; | ||
130 | int ret; | ||
131 | 104 | ||
132 | ret = 0; | 105 | if (regno >= 16) |
106 | return -EINVAL; | ||
133 | 107 | ||
134 | sh7760fb_wait_vsync(info); | 108 | /* only FB_VISUAL_TRUECOLOR supported */ |
135 | 109 | ||
136 | /* request palette access */ | 110 | red >>= 16 - info->var.red.length; |
137 | iowrite16(LDPALCR_PALEN, par->base + LDPALCR); | 111 | green >>= 16 - info->var.green.length; |
112 | blue >>= 16 - info->var.blue.length; | ||
113 | transp >>= 16 - info->var.transp.length; | ||
138 | 114 | ||
139 | /* poll for access grant */ | 115 | palette[regno] = (red << info->var.red.offset) | |
140 | tmo = 100; | 116 | (green << info->var.green.offset) | |
141 | while (!(ioread16(par->base + LDPALCR) & LDPALCR_PALS) && (--tmo)) | 117 | (blue << info->var.blue.offset) | |
142 | cpu_relax(); | 118 | (transp << info->var.transp.offset); |
143 | 119 | ||
144 | if (!tmo) { | 120 | return 0; |
145 | ret = 1; | ||
146 | dev_dbg(info->dev, "no palette access!\n"); | ||
147 | goto out; | ||
148 | } | ||
149 | |||
150 | while (l && (s < 256)) { | ||
151 | col = ((*r) & 0xff) << 16; | ||
152 | col |= ((*g) & 0xff) << 8; | ||
153 | col |= ((*b) & 0xff); | ||
154 | col &= SH7760FB_PALETTE_MASK; | ||
155 | iowrite32(col, par->base + LDPR(s)); | ||
156 | |||
157 | if (s < 16) | ||
158 | ((u32 *) (info->pseudo_palette))[s] = s; | ||
159 | |||
160 | s++; | ||
161 | l--; | ||
162 | r++; | ||
163 | g++; | ||
164 | b++; | ||
165 | } | ||
166 | out: | ||
167 | iowrite16(0, par->base + LDPALCR); | ||
168 | return ret; | ||
169 | } | 121 | } |
170 | 122 | ||
171 | static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info, | 123 | static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info, |
@@ -406,7 +358,7 @@ static struct fb_ops sh7760fb_ops = { | |||
406 | .owner = THIS_MODULE, | 358 | .owner = THIS_MODULE, |
407 | .fb_blank = sh7760fb_blank, | 359 | .fb_blank = sh7760fb_blank, |
408 | .fb_check_var = sh7760fb_check_var, | 360 | .fb_check_var = sh7760fb_check_var, |
409 | .fb_setcmap = sh7760fb_setcmap, | 361 | .fb_setcolreg = sh7760_setcolreg, |
410 | .fb_set_par = sh7760fb_set_par, | 362 | .fb_set_par = sh7760fb_set_par, |
411 | .fb_fillrect = cfb_fillrect, | 363 | .fb_fillrect = cfb_fillrect, |
412 | .fb_copyarea = cfb_copyarea, | 364 | .fb_copyarea = cfb_copyarea, |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index efff672fd7b8..0e2b8fd24df1 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
19 | #include <linux/interrupt.h> | ||
19 | #include <video/sh_mobile_lcdc.h> | 20 | #include <video/sh_mobile_lcdc.h> |
21 | #include <asm/atomic.h> | ||
20 | 22 | ||
21 | #define PALETTE_NR 16 | 23 | #define PALETTE_NR 16 |
22 | 24 | ||
@@ -30,11 +32,15 @@ struct sh_mobile_lcdc_chan { | |||
30 | u32 pseudo_palette[PALETTE_NR]; | 32 | u32 pseudo_palette[PALETTE_NR]; |
31 | struct fb_info info; | 33 | struct fb_info info; |
32 | dma_addr_t dma_handle; | 34 | dma_addr_t dma_handle; |
35 | struct fb_deferred_io defio; | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | struct sh_mobile_lcdc_priv { | 38 | struct sh_mobile_lcdc_priv { |
36 | void __iomem *base; | 39 | void __iomem *base; |
40 | int irq; | ||
37 | #ifdef CONFIG_HAVE_CLK | 41 | #ifdef CONFIG_HAVE_CLK |
42 | atomic_t clk_usecnt; | ||
43 | struct clk *dot_clk; | ||
38 | struct clk *clk; | 44 | struct clk *clk; |
39 | #endif | 45 | #endif |
40 | unsigned long lddckr; | 46 | unsigned long lddckr; |
@@ -56,7 +62,7 @@ struct sh_mobile_lcdc_priv { | |||
56 | 62 | ||
57 | /* per-channel registers */ | 63 | /* per-channel registers */ |
58 | enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R, | 64 | enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R, |
59 | LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR }; | 65 | LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR }; |
60 | 66 | ||
61 | static unsigned long lcdc_offs_mainlcd[] = { | 67 | static unsigned long lcdc_offs_mainlcd[] = { |
62 | [LDDCKPAT1R] = 0x400, | 68 | [LDDCKPAT1R] = 0x400, |
@@ -66,6 +72,7 @@ static unsigned long lcdc_offs_mainlcd[] = { | |||
66 | [LDMT3R] = 0x420, | 72 | [LDMT3R] = 0x420, |
67 | [LDDFR] = 0x424, | 73 | [LDDFR] = 0x424, |
68 | [LDSM1R] = 0x428, | 74 | [LDSM1R] = 0x428, |
75 | [LDSM2R] = 0x42c, | ||
69 | [LDSA1R] = 0x430, | 76 | [LDSA1R] = 0x430, |
70 | [LDMLSR] = 0x438, | 77 | [LDMLSR] = 0x438, |
71 | [LDHCNR] = 0x448, | 78 | [LDHCNR] = 0x448, |
@@ -83,6 +90,7 @@ static unsigned long lcdc_offs_sublcd[] = { | |||
83 | [LDMT3R] = 0x608, | 90 | [LDMT3R] = 0x608, |
84 | [LDDFR] = 0x60c, | 91 | [LDDFR] = 0x60c, |
85 | [LDSM1R] = 0x610, | 92 | [LDSM1R] = 0x610, |
93 | [LDSM2R] = 0x614, | ||
86 | [LDSA1R] = 0x618, | 94 | [LDSA1R] = 0x618, |
87 | [LDMLSR] = 0x620, | 95 | [LDMLSR] = 0x620, |
88 | [LDHCNR] = 0x624, | 96 | [LDHCNR] = 0x624, |
@@ -96,6 +104,8 @@ static unsigned long lcdc_offs_sublcd[] = { | |||
96 | #define LCDC_RESET 0x00000100 | 104 | #define LCDC_RESET 0x00000100 |
97 | #define DISPLAY_BEU 0x00000008 | 105 | #define DISPLAY_BEU 0x00000008 |
98 | #define LCDC_ENABLE 0x00000001 | 106 | #define LCDC_ENABLE 0x00000001 |
107 | #define LDINTR_FE 0x00000400 | ||
108 | #define LDINTR_FS 0x00000004 | ||
99 | 109 | ||
100 | static void lcdc_write_chan(struct sh_mobile_lcdc_chan *chan, | 110 | static void lcdc_write_chan(struct sh_mobile_lcdc_chan *chan, |
101 | int reg_nr, unsigned long data) | 111 | int reg_nr, unsigned long data) |
@@ -170,6 +180,65 @@ struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { | |||
170 | lcdc_sys_read_data, | 180 | lcdc_sys_read_data, |
171 | }; | 181 | }; |
172 | 182 | ||
183 | #ifdef CONFIG_HAVE_CLK | ||
184 | static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) | ||
185 | { | ||
186 | if (atomic_inc_and_test(&priv->clk_usecnt)) { | ||
187 | clk_enable(priv->clk); | ||
188 | if (priv->dot_clk) | ||
189 | clk_enable(priv->dot_clk); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) | ||
194 | { | ||
195 | if (atomic_sub_return(1, &priv->clk_usecnt) == -1) { | ||
196 | if (priv->dot_clk) | ||
197 | clk_disable(priv->dot_clk); | ||
198 | clk_disable(priv->clk); | ||
199 | } | ||
200 | } | ||
201 | #else | ||
202 | static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {} | ||
203 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} | ||
204 | #endif | ||
205 | |||
206 | static void sh_mobile_lcdc_deferred_io(struct fb_info *info, | ||
207 | struct list_head *pagelist) | ||
208 | { | ||
209 | struct sh_mobile_lcdc_chan *ch = info->par; | ||
210 | |||
211 | /* enable clocks before accessing hardware */ | ||
212 | sh_mobile_lcdc_clk_on(ch->lcdc); | ||
213 | |||
214 | /* trigger panel update */ | ||
215 | lcdc_write_chan(ch, LDSM2R, 1); | ||
216 | } | ||
217 | |||
218 | static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) | ||
219 | { | ||
220 | struct fb_deferred_io *fbdefio = info->fbdefio; | ||
221 | |||
222 | if (fbdefio) | ||
223 | schedule_delayed_work(&info->deferred_work, fbdefio->delay); | ||
224 | } | ||
225 | |||
226 | static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) | ||
227 | { | ||
228 | struct sh_mobile_lcdc_priv *priv = data; | ||
229 | unsigned long tmp; | ||
230 | |||
231 | /* acknowledge interrupt */ | ||
232 | tmp = lcdc_read(priv, _LDINTR); | ||
233 | tmp &= 0xffffff00; /* mask in high 24 bits */ | ||
234 | tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */ | ||
235 | lcdc_write(priv, _LDINTR, tmp); | ||
236 | |||
237 | /* disable clocks */ | ||
238 | sh_mobile_lcdc_clk_off(priv); | ||
239 | return IRQ_HANDLED; | ||
240 | } | ||
241 | |||
173 | static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv, | 242 | static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv, |
174 | int start) | 243 | int start) |
175 | { | 244 | { |
@@ -207,6 +276,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
207 | int k, m; | 276 | int k, m; |
208 | int ret = 0; | 277 | int ret = 0; |
209 | 278 | ||
279 | /* enable clocks before accessing the hardware */ | ||
280 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) | ||
281 | if (priv->ch[k].enabled) | ||
282 | sh_mobile_lcdc_clk_on(priv); | ||
283 | |||
210 | /* reset */ | 284 | /* reset */ |
211 | lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET); | 285 | lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET); |
212 | lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0); | 286 | lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0); |
@@ -249,7 +323,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
249 | lcdc_write(priv, _LDDCKSTPR, 0); | 323 | lcdc_write(priv, _LDDCKSTPR, 0); |
250 | lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0); | 324 | lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0); |
251 | 325 | ||
252 | /* interrupts are disabled */ | 326 | /* interrupts are disabled to begin with */ |
253 | lcdc_write(priv, _LDINTR, 0); | 327 | lcdc_write(priv, _LDINTR, 0); |
254 | 328 | ||
255 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | 329 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { |
@@ -310,9 +384,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
310 | return ret; | 384 | return ret; |
311 | } | 385 | } |
312 | 386 | ||
313 | /* --- display_lcdc_data() --- */ | ||
314 | lcdc_write(priv, _LDINTR, 0x00000f00); | ||
315 | |||
316 | /* word and long word swap */ | 387 | /* word and long word swap */ |
317 | lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6); | 388 | lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6); |
318 | 389 | ||
@@ -334,8 +405,24 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
334 | /* set line size */ | 405 | /* set line size */ |
335 | lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length); | 406 | lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length); |
336 | 407 | ||
337 | /* continuous read mode */ | 408 | /* setup deferred io if SYS bus */ |
338 | lcdc_write_chan(ch, LDSM1R, 0); | 409 | tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; |
410 | if (ch->ldmt1r_value & (1 << 12) && tmp) { | ||
411 | ch->defio.deferred_io = sh_mobile_lcdc_deferred_io; | ||
412 | ch->defio.delay = msecs_to_jiffies(tmp); | ||
413 | ch->info.fbdefio = &ch->defio; | ||
414 | fb_deferred_io_init(&ch->info); | ||
415 | |||
416 | /* one-shot mode */ | ||
417 | lcdc_write_chan(ch, LDSM1R, 1); | ||
418 | |||
419 | /* enable "Frame End Interrupt Enable" bit */ | ||
420 | lcdc_write(priv, _LDINTR, LDINTR_FE); | ||
421 | |||
422 | } else { | ||
423 | /* continuous read mode */ | ||
424 | lcdc_write_chan(ch, LDSM1R, 0); | ||
425 | } | ||
339 | } | 426 | } |
340 | 427 | ||
341 | /* display output */ | 428 | /* display output */ |
@@ -359,6 +446,7 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) | |||
359 | { | 446 | { |
360 | struct sh_mobile_lcdc_chan *ch; | 447 | struct sh_mobile_lcdc_chan *ch; |
361 | struct sh_mobile_lcdc_board_cfg *board_cfg; | 448 | struct sh_mobile_lcdc_board_cfg *board_cfg; |
449 | unsigned long tmp; | ||
362 | int k; | 450 | int k; |
363 | 451 | ||
364 | /* tell the board code to disable the panel */ | 452 | /* tell the board code to disable the panel */ |
@@ -367,10 +455,22 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) | |||
367 | board_cfg = &ch->cfg.board_cfg; | 455 | board_cfg = &ch->cfg.board_cfg; |
368 | if (board_cfg->display_off) | 456 | if (board_cfg->display_off) |
369 | board_cfg->display_off(board_cfg->board_data); | 457 | board_cfg->display_off(board_cfg->board_data); |
458 | |||
459 | /* cleanup deferred io if SYS bus */ | ||
460 | tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; | ||
461 | if (ch->ldmt1r_value & (1 << 12) && tmp) { | ||
462 | fb_deferred_io_cleanup(&ch->info); | ||
463 | ch->info.fbdefio = NULL; | ||
464 | } | ||
370 | } | 465 | } |
371 | 466 | ||
372 | /* stop the lcdc */ | 467 | /* stop the lcdc */ |
373 | sh_mobile_lcdc_start_stop(priv, 0); | 468 | sh_mobile_lcdc_start_stop(priv, 0); |
469 | |||
470 | /* stop clocks */ | ||
471 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) | ||
472 | if (priv->ch[k].enabled) | ||
473 | sh_mobile_lcdc_clk_off(priv); | ||
374 | } | 474 | } |
375 | 475 | ||
376 | static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) | 476 | static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) |
@@ -413,9 +513,13 @@ static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) | |||
413 | return -EINVAL; | 513 | return -EINVAL; |
414 | } | 514 | } |
415 | 515 | ||
416 | static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source, | 516 | static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev, |
517 | int clock_source, | ||
417 | struct sh_mobile_lcdc_priv *priv) | 518 | struct sh_mobile_lcdc_priv *priv) |
418 | { | 519 | { |
520 | #ifdef CONFIG_HAVE_CLK | ||
521 | char clk_name[8]; | ||
522 | #endif | ||
419 | char *str; | 523 | char *str; |
420 | int icksel; | 524 | int icksel; |
421 | 525 | ||
@@ -430,14 +534,21 @@ static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source, | |||
430 | priv->lddckr = icksel << 16; | 534 | priv->lddckr = icksel << 16; |
431 | 535 | ||
432 | #ifdef CONFIG_HAVE_CLK | 536 | #ifdef CONFIG_HAVE_CLK |
537 | atomic_set(&priv->clk_usecnt, -1); | ||
538 | snprintf(clk_name, sizeof(clk_name), "lcdc%d", pdev->id); | ||
539 | priv->clk = clk_get(&pdev->dev, clk_name); | ||
540 | if (IS_ERR(priv->clk)) { | ||
541 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
542 | return PTR_ERR(priv->clk); | ||
543 | } | ||
544 | |||
433 | if (str) { | 545 | if (str) { |
434 | priv->clk = clk_get(dev, str); | 546 | priv->dot_clk = clk_get(&pdev->dev, str); |
435 | if (IS_ERR(priv->clk)) { | 547 | if (IS_ERR(priv->dot_clk)) { |
436 | dev_err(dev, "cannot get clock %s\n", str); | 548 | dev_err(&pdev->dev, "cannot get dot clock %s\n", str); |
437 | return PTR_ERR(priv->clk); | 549 | clk_put(priv->clk); |
550 | return PTR_ERR(priv->dot_clk); | ||
438 | } | 551 | } |
439 | |||
440 | clk_enable(priv->clk); | ||
441 | } | 552 | } |
442 | #endif | 553 | #endif |
443 | 554 | ||
@@ -475,11 +586,34 @@ static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { | |||
475 | .accel = FB_ACCEL_NONE, | 586 | .accel = FB_ACCEL_NONE, |
476 | }; | 587 | }; |
477 | 588 | ||
589 | static void sh_mobile_lcdc_fillrect(struct fb_info *info, | ||
590 | const struct fb_fillrect *rect) | ||
591 | { | ||
592 | sys_fillrect(info, rect); | ||
593 | sh_mobile_lcdc_deferred_io_touch(info); | ||
594 | } | ||
595 | |||
596 | static void sh_mobile_lcdc_copyarea(struct fb_info *info, | ||
597 | const struct fb_copyarea *area) | ||
598 | { | ||
599 | sys_copyarea(info, area); | ||
600 | sh_mobile_lcdc_deferred_io_touch(info); | ||
601 | } | ||
602 | |||
603 | static void sh_mobile_lcdc_imageblit(struct fb_info *info, | ||
604 | const struct fb_image *image) | ||
605 | { | ||
606 | sys_imageblit(info, image); | ||
607 | sh_mobile_lcdc_deferred_io_touch(info); | ||
608 | } | ||
609 | |||
478 | static struct fb_ops sh_mobile_lcdc_ops = { | 610 | static struct fb_ops sh_mobile_lcdc_ops = { |
479 | .fb_setcolreg = sh_mobile_lcdc_setcolreg, | 611 | .fb_setcolreg = sh_mobile_lcdc_setcolreg, |
480 | .fb_fillrect = cfb_fillrect, | 612 | .fb_read = fb_sys_read, |
481 | .fb_copyarea = cfb_copyarea, | 613 | .fb_write = fb_sys_write, |
482 | .fb_imageblit = cfb_imageblit, | 614 | .fb_fillrect = sh_mobile_lcdc_fillrect, |
615 | .fb_copyarea = sh_mobile_lcdc_copyarea, | ||
616 | .fb_imageblit = sh_mobile_lcdc_imageblit, | ||
483 | }; | 617 | }; |
484 | 618 | ||
485 | static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) | 619 | static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) |
@@ -540,8 +674,9 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
540 | } | 674 | } |
541 | 675 | ||
542 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 676 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
543 | if (res == NULL) { | 677 | i = platform_get_irq(pdev, 0); |
544 | dev_err(&pdev->dev, "cannot find IO resource\n"); | 678 | if (!res || i < 0) { |
679 | dev_err(&pdev->dev, "cannot get platform resources\n"); | ||
545 | error = -ENOENT; | 680 | error = -ENOENT; |
546 | goto err0; | 681 | goto err0; |
547 | } | 682 | } |
@@ -553,6 +688,14 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
553 | goto err0; | 688 | goto err0; |
554 | } | 689 | } |
555 | 690 | ||
691 | error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED, | ||
692 | pdev->dev.bus_id, priv); | ||
693 | if (error) { | ||
694 | dev_err(&pdev->dev, "unable to request irq\n"); | ||
695 | goto err1; | ||
696 | } | ||
697 | |||
698 | priv->irq = i; | ||
556 | platform_set_drvdata(pdev, priv); | 699 | platform_set_drvdata(pdev, priv); |
557 | pdata = pdev->dev.platform_data; | 700 | pdata = pdev->dev.platform_data; |
558 | 701 | ||
@@ -587,8 +730,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
587 | goto err1; | 730 | goto err1; |
588 | } | 731 | } |
589 | 732 | ||
590 | error = sh_mobile_lcdc_setup_clocks(&pdev->dev, | 733 | error = sh_mobile_lcdc_setup_clocks(pdev, pdata->clock_source, priv); |
591 | pdata->clock_source, priv); | ||
592 | if (error) { | 734 | if (error) { |
593 | dev_err(&pdev->dev, "unable to setup clocks\n"); | 735 | dev_err(&pdev->dev, "unable to setup clocks\n"); |
594 | goto err1; | 736 | goto err1; |
@@ -637,6 +779,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
637 | info->fix.smem_start = priv->ch[i].dma_handle; | 779 | info->fix.smem_start = priv->ch[i].dma_handle; |
638 | info->screen_base = buf; | 780 | info->screen_base = buf; |
639 | info->device = &pdev->dev; | 781 | info->device = &pdev->dev; |
782 | info->par = &priv->ch[i]; | ||
640 | } | 783 | } |
641 | 784 | ||
642 | if (error) | 785 | if (error) |
@@ -664,6 +807,10 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
664 | (int) priv->ch[i].cfg.lcd_cfg.xres, | 807 | (int) priv->ch[i].cfg.lcd_cfg.xres, |
665 | (int) priv->ch[i].cfg.lcd_cfg.yres, | 808 | (int) priv->ch[i].cfg.lcd_cfg.yres, |
666 | priv->ch[i].cfg.bpp); | 809 | priv->ch[i].cfg.bpp); |
810 | |||
811 | /* deferred io mode: disable clock to save power */ | ||
812 | if (info->fbdefio) | ||
813 | sh_mobile_lcdc_clk_off(priv); | ||
667 | } | 814 | } |
668 | 815 | ||
669 | return 0; | 816 | return 0; |
@@ -697,15 +844,16 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) | |||
697 | } | 844 | } |
698 | 845 | ||
699 | #ifdef CONFIG_HAVE_CLK | 846 | #ifdef CONFIG_HAVE_CLK |
700 | if (priv->clk) { | 847 | if (priv->dot_clk) |
701 | clk_disable(priv->clk); | 848 | clk_put(priv->dot_clk); |
702 | clk_put(priv->clk); | 849 | clk_put(priv->clk); |
703 | } | ||
704 | #endif | 850 | #endif |
705 | 851 | ||
706 | if (priv->base) | 852 | if (priv->base) |
707 | iounmap(priv->base); | 853 | iounmap(priv->base); |
708 | 854 | ||
855 | if (priv->irq) | ||
856 | free_irq(priv->irq, priv); | ||
709 | kfree(priv); | 857 | kfree(priv); |
710 | return 0; | 858 | return 0; |
711 | } | 859 | } |
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 0d15b0eaf79a..5139c25ca962 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c | |||
@@ -356,7 +356,9 @@ int w1_reset_select_slave(struct w1_slave *sl) | |||
356 | w1_write_8(sl->master, W1_SKIP_ROM); | 356 | w1_write_8(sl->master, W1_SKIP_ROM); |
357 | else { | 357 | else { |
358 | u8 match[9] = {W1_MATCH_ROM, }; | 358 | u8 match[9] = {W1_MATCH_ROM, }; |
359 | memcpy(&match[1], (u8 *)&sl->reg_num, 8); | 359 | u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); |
360 | |||
361 | memcpy(&match[1], &rn, 8); | ||
360 | w1_write_block(sl->master, match, 9); | 362 | w1_write_block(sl->master, match, 9); |
361 | } | 363 | } |
362 | return 0; | 364 | return 0; |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 526c191e84ea..8dc7109d61b7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -44,13 +44,15 @@ | |||
44 | #include <linux/list.h> | 44 | #include <linux/list.h> |
45 | #include <linux/sysdev.h> | 45 | #include <linux/sysdev.h> |
46 | 46 | ||
47 | #include <asm/xen/hypervisor.h> | ||
48 | #include <asm/page.h> | 47 | #include <asm/page.h> |
49 | #include <asm/pgalloc.h> | 48 | #include <asm/pgalloc.h> |
50 | #include <asm/pgtable.h> | 49 | #include <asm/pgtable.h> |
51 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
52 | #include <asm/tlb.h> | 51 | #include <asm/tlb.h> |
53 | 52 | ||
53 | #include <asm/xen/hypervisor.h> | ||
54 | #include <asm/xen/hypercall.h> | ||
55 | #include <xen/interface/xen.h> | ||
54 | #include <xen/interface/memory.h> | 56 | #include <xen/interface/memory.h> |
55 | #include <xen/xenbus.h> | 57 | #include <xen/xenbus.h> |
56 | #include <xen/features.h> | 58 | #include <xen/features.h> |
diff --git a/drivers/xen/features.c b/drivers/xen/features.c index 0707714e40d6..99eda169c779 100644 --- a/drivers/xen/features.c +++ b/drivers/xen/features.c | |||
@@ -8,7 +8,11 @@ | |||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/cache.h> | 9 | #include <linux/cache.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <asm/xen/hypervisor.h> | 11 | |
12 | #include <asm/xen/hypercall.h> | ||
13 | |||
14 | #include <xen/interface/xen.h> | ||
15 | #include <xen/interface/version.h> | ||
12 | #include <xen/features.h> | 16 | #include <xen/features.h> |
13 | 17 | ||
14 | u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; | 18 | u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 06592b9da83c..7d8f531fb8e8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <xen/interface/xen.h> | 40 | #include <xen/interface/xen.h> |
41 | #include <xen/page.h> | 41 | #include <xen/page.h> |
42 | #include <xen/grant_table.h> | 42 | #include <xen/grant_table.h> |
43 | #include <asm/xen/hypercall.h> | ||
43 | 44 | ||
44 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
45 | #include <asm/sync_bitops.h> | 46 | #include <asm/sync_bitops.h> |