diff options
Diffstat (limited to 'drivers')
128 files changed, 6053 insertions, 1687 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index f42a03029b7c..91874e048552 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
| @@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/ | |||
| 10 | obj-$(CONFIG_PARISC) += parisc/ | 10 | obj-$(CONFIG_PARISC) += parisc/ |
| 11 | obj-$(CONFIG_RAPIDIO) += rapidio/ | 11 | obj-$(CONFIG_RAPIDIO) += rapidio/ |
| 12 | obj-y += video/ | 12 | obj-y += video/ |
| 13 | obj-y += idle/ | ||
| 13 | obj-$(CONFIG_ACPI) += acpi/ | 14 | obj-$(CONFIG_ACPI) += acpi/ |
| 14 | obj-$(CONFIG_SFI) += sfi/ | 15 | obj-$(CONFIG_SFI) += sfi/ |
| 15 | # PnP must come after ACPI since it will eventually need to check if acpi | 16 | # PnP must come after ACPI since it will eventually need to check if acpi |
| @@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/ | |||
| 91 | obj-y += lguest/ | 92 | obj-y += lguest/ |
| 92 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 93 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
| 93 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ | 94 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ |
| 94 | obj-y += idle/ | ||
| 95 | obj-$(CONFIG_MMC) += mmc/ | 95 | obj-$(CONFIG_MMC) += mmc/ |
| 96 | obj-$(CONFIG_MEMSTICK) += memstick/ | 96 | obj-$(CONFIG_MEMSTICK) += memstick/ |
| 97 | obj-$(CONFIG_NEW_LEDS) += leds/ | 97 | obj-$(CONFIG_NEW_LEDS) += leds/ |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 93d2c7971df6..746411518802 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -360,4 +360,13 @@ config ACPI_SBS | |||
| 360 | To compile this driver as a module, choose M here: | 360 | To compile this driver as a module, choose M here: |
| 361 | the modules will be called sbs and sbshc. | 361 | the modules will be called sbs and sbshc. |
| 362 | 362 | ||
| 363 | config ACPI_HED | ||
| 364 | tristate "Hardware Error Device" | ||
| 365 | help | ||
| 366 | This driver supports the Hardware Error Device (PNP0C33), | ||
| 367 | which is used to report some hardware errors notified via | ||
| 368 | SCI, mainly the corrected errors. | ||
| 369 | |||
| 370 | source "drivers/acpi/apei/Kconfig" | ||
| 371 | |||
| 363 | endif # ACPI | 372 | endif # ACPI |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index a8d8998dd5c5..6ee33169e1dc 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -19,7 +19,7 @@ obj-y += acpi.o \ | |||
| 19 | 19 | ||
| 20 | # All the builtin files are in the "acpi." module_param namespace. | 20 | # All the builtin files are in the "acpi." module_param namespace. |
| 21 | acpi-y += osl.o utils.o reboot.o | 21 | acpi-y += osl.o utils.o reboot.o |
| 22 | acpi-y += hest.o | 22 | acpi-y += atomicio.o |
| 23 | 23 | ||
| 24 | # sleep related files | 24 | # sleep related files |
| 25 | acpi-y += wakeup.o | 25 | acpi-y += wakeup.o |
| @@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o | |||
| 59 | obj-$(CONFIG_ACPI_SBS) += sbshc.o | 59 | obj-$(CONFIG_ACPI_SBS) += sbshc.o |
| 60 | obj-$(CONFIG_ACPI_SBS) += sbs.o | 60 | obj-$(CONFIG_ACPI_SBS) += sbs.o |
| 61 | obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o | 61 | obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o |
| 62 | obj-$(CONFIG_ACPI_HED) += hed.o | ||
| 62 | 63 | ||
| 63 | # processor has its own "processor." module_param namespace | 64 | # processor has its own "processor." module_param namespace |
| 64 | processor-y := processor_driver.o processor_throttling.o | 65 | processor-y := processor_driver.o processor_throttling.o |
| @@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o | |||
| 66 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o | 67 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o |
| 67 | 68 | ||
| 68 | obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o | 69 | obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o |
| 70 | |||
| 71 | obj-$(CONFIG_ACPI_APEI) += apei/ | ||
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 62122134693b..d269a8f3329c 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
| @@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock); | |||
| 43 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | 43 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) |
| 44 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | 44 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) |
| 45 | static unsigned long power_saving_mwait_eax; | 45 | static unsigned long power_saving_mwait_eax; |
| 46 | |||
| 47 | static unsigned char tsc_detected_unstable; | ||
| 48 | static unsigned char tsc_marked_unstable; | ||
| 49 | |||
| 46 | static void power_saving_mwait_init(void) | 50 | static void power_saving_mwait_init(void) |
| 47 | { | 51 | { |
| 48 | unsigned int eax, ebx, ecx, edx; | 52 | unsigned int eax, ebx, ecx, edx; |
| @@ -87,8 +91,8 @@ static void power_saving_mwait_init(void) | |||
| 87 | 91 | ||
| 88 | /*FALL THROUGH*/ | 92 | /*FALL THROUGH*/ |
| 89 | default: | 93 | default: |
| 90 | /* TSC could halt in idle, so notify users */ | 94 | /* TSC could halt in idle */ |
| 91 | mark_tsc_unstable("TSC halts in idle"); | 95 | tsc_detected_unstable = 1; |
| 92 | } | 96 | } |
| 93 | #endif | 97 | #endif |
| 94 | } | 98 | } |
| @@ -168,16 +172,14 @@ static int power_saving_thread(void *data) | |||
| 168 | 172 | ||
| 169 | do_sleep = 0; | 173 | do_sleep = 0; |
| 170 | 174 | ||
| 171 | current_thread_info()->status &= ~TS_POLLING; | ||
| 172 | /* | ||
| 173 | * TS_POLLING-cleared state must be visible before we test | ||
| 174 | * NEED_RESCHED: | ||
| 175 | */ | ||
| 176 | smp_mb(); | ||
| 177 | |||
| 178 | expire_time = jiffies + HZ * (100 - idle_pct) / 100; | 175 | expire_time = jiffies + HZ * (100 - idle_pct) / 100; |
| 179 | 176 | ||
| 180 | while (!need_resched()) { | 177 | while (!need_resched()) { |
| 178 | if (tsc_detected_unstable && !tsc_marked_unstable) { | ||
| 179 | /* TSC could halt in idle, so notify users */ | ||
| 180 | mark_tsc_unstable("TSC halts in idle"); | ||
| 181 | tsc_marked_unstable = 1; | ||
| 182 | } | ||
| 181 | local_irq_disable(); | 183 | local_irq_disable(); |
| 182 | cpu = smp_processor_id(); | 184 | cpu = smp_processor_id(); |
| 183 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | 185 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, |
| @@ -200,8 +202,6 @@ static int power_saving_thread(void *data) | |||
| 200 | } | 202 | } |
| 201 | } | 203 | } |
| 202 | 204 | ||
| 203 | current_thread_info()->status |= TS_POLLING; | ||
| 204 | |||
| 205 | /* | 205 | /* |
| 206 | * current sched_rt has threshold for rt task running time. | 206 | * current sched_rt has threshold for rt task running time. |
| 207 | * When a rt task uses 95% CPU time, the rt thread will be | 207 | * When a rt task uses 95% CPU time, the rt thread will be |
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 7c7bbb4d402c..d5a5efc043bf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
| @@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
| 69 | 69 | ||
| 70 | acpi_status acpi_enable(void) | 70 | acpi_status acpi_enable(void) |
| 71 | { | 71 | { |
| 72 | acpi_status status = AE_OK; | 72 | acpi_status status; |
| 73 | 73 | ||
| 74 | ACPI_FUNCTION_TRACE(acpi_enable); | 74 | ACPI_FUNCTION_TRACE(acpi_enable); |
| 75 | 75 | ||
| @@ -84,21 +84,30 @@ acpi_status acpi_enable(void) | |||
| 84 | if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { | 84 | if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { |
| 85 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | 85 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, |
| 86 | "System is already in ACPI mode\n")); | 86 | "System is already in ACPI mode\n")); |
| 87 | } else { | 87 | return_ACPI_STATUS(AE_OK); |
| 88 | /* Transition to ACPI mode */ | 88 | } |
| 89 | 89 | ||
| 90 | status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); | 90 | /* Transition to ACPI mode */ |
| 91 | if (ACPI_FAILURE(status)) { | ||
| 92 | ACPI_ERROR((AE_INFO, | ||
| 93 | "Could not transition to ACPI mode")); | ||
| 94 | return_ACPI_STATUS(status); | ||
| 95 | } | ||
| 96 | 91 | ||
| 97 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | 92 | status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); |
| 98 | "Transition to ACPI mode successful\n")); | 93 | if (ACPI_FAILURE(status)) { |
| 94 | ACPI_ERROR((AE_INFO, | ||
| 95 | "Could not transition to ACPI mode")); | ||
| 96 | return_ACPI_STATUS(status); | ||
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | return_ACPI_STATUS(status); | 99 | /* Sanity check that transition succeeded */ |
| 100 | |||
| 101 | if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { | ||
| 102 | ACPI_ERROR((AE_INFO, | ||
| 103 | "Hardware did not enter ACPI mode")); | ||
| 104 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); | ||
| 105 | } | ||
| 106 | |||
| 107 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
| 108 | "Transition to ACPI mode successful\n")); | ||
| 109 | |||
| 110 | return_ACPI_STATUS(AE_OK); | ||
| 102 | } | 111 | } |
| 103 | 112 | ||
| 104 | ACPI_EXPORT_SYMBOL(acpi_enable) | 113 | ACPI_EXPORT_SYMBOL(acpi_enable) |
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 679a112a7d26..b44274a0b62c 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c | |||
| @@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode) | |||
| 63 | { | 63 | { |
| 64 | 64 | ||
| 65 | acpi_status status; | 65 | acpi_status status; |
| 66 | u32 retry; | ||
| 67 | 66 | ||
| 68 | ACPI_FUNCTION_TRACE(hw_set_mode); | 67 | ACPI_FUNCTION_TRACE(hw_set_mode); |
| 69 | 68 | ||
| @@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode) | |||
| 125 | return_ACPI_STATUS(status); | 124 | return_ACPI_STATUS(status); |
| 126 | } | 125 | } |
| 127 | 126 | ||
| 128 | /* | 127 | return_ACPI_STATUS(AE_OK); |
| 129 | * Some hardware takes a LONG time to switch modes. Give them 3 sec to | ||
| 130 | * do so, but allow faster systems to proceed more quickly. | ||
| 131 | */ | ||
| 132 | retry = 3000; | ||
| 133 | while (retry) { | ||
| 134 | if (acpi_hw_get_mode() == mode) { | ||
| 135 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
| 136 | "Mode %X successfully enabled\n", | ||
| 137 | mode)); | ||
| 138 | return_ACPI_STATUS(AE_OK); | ||
| 139 | } | ||
| 140 | acpi_os_stall(1000); | ||
| 141 | retry--; | ||
| 142 | } | ||
| 143 | |||
| 144 | ACPI_ERROR((AE_INFO, "Hardware did not change modes")); | ||
| 145 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); | ||
| 146 | } | 128 | } |
| 147 | 129 | ||
| 148 | /******************************************************************************* | 130 | /******************************************************************************* |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig new file mode 100644 index 000000000000..f8c668f27b5a --- /dev/null +++ b/drivers/acpi/apei/Kconfig | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | config ACPI_APEI | ||
| 2 | bool "ACPI Platform Error Interface (APEI)" | ||
| 3 | depends on X86 | ||
| 4 | help | ||
| 5 | APEI allows to report errors (for example from the chipset) | ||
| 6 | to the operating system. This improves NMI handling | ||
| 7 | especially. In addition it supports error serialization and | ||
| 8 | error injection. | ||
| 9 | |||
| 10 | config ACPI_APEI_GHES | ||
| 11 | tristate "APEI Generic Hardware Error Source" | ||
| 12 | depends on ACPI_APEI && X86 | ||
| 13 | select ACPI_HED | ||
| 14 | help | ||
| 15 | Generic Hardware Error Source provides a way to report | ||
| 16 | platform hardware errors (such as that from chipset). It | ||
| 17 | works in so called "Firmware First" mode, that is, hardware | ||
| 18 | errors are reported to firmware firstly, then reported to | ||
| 19 | Linux by firmware. This way, some non-standard hardware | ||
| 20 | error registers or non-standard hardware link can be checked | ||
| 21 | by firmware to produce more valuable hardware error | ||
| 22 | information for Linux. | ||
| 23 | |||
| 24 | config ACPI_APEI_EINJ | ||
| 25 | tristate "APEI Error INJection (EINJ)" | ||
| 26 | depends on ACPI_APEI && DEBUG_FS | ||
| 27 | help | ||
| 28 | EINJ provides a hardware error injection mechanism, it is | ||
| 29 | mainly used for debugging and testing the other parts of | ||
| 30 | APEI and some other RAS features. | ||
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile new file mode 100644 index 000000000000..b13b03a17789 --- /dev/null +++ b/drivers/acpi/apei/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | obj-$(CONFIG_ACPI_APEI) += apei.o | ||
| 2 | obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o | ||
| 3 | obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o | ||
| 4 | |||
| 5 | apei-y := apei-base.o hest.o cper.o erst.o | ||
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c new file mode 100644 index 000000000000..db3946e9c66b --- /dev/null +++ b/drivers/acpi/apei/apei-base.c | |||
| @@ -0,0 +1,593 @@ | |||
| 1 | /* | ||
| 2 | * apei-base.c - ACPI Platform Error Interface (APEI) supporting | ||
| 3 | * infrastructure | ||
| 4 | * | ||
| 5 | * APEI allows to report errors (for example from the chipset) to the | ||
| 6 | * the operating system. This improves NMI handling especially. In | ||
| 7 | * addition it supports error serialization and error injection. | ||
| 8 | * | ||
| 9 | * For more information about APEI, please refer to ACPI Specification | ||
| 10 | * version 4.0, chapter 17. | ||
| 11 | * | ||
| 12 | * This file has Common functions used by more than one APEI table, | ||
| 13 | * including framework of interpreter for ERST and EINJ; resource | ||
| 14 | * management for APEI registers. | ||
| 15 | * | ||
| 16 | * Copyright (C) 2009, Intel Corp. | ||
| 17 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 18 | * | ||
| 19 | * This program is free software; you can redistribute it and/or | ||
| 20 | * modify it under the terms of the GNU General Public License version | ||
| 21 | * 2 as published by the Free Software Foundation. | ||
| 22 | * | ||
| 23 | * This program is distributed in the hope that it will be useful, | ||
| 24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 26 | * GNU General Public License for more details. | ||
| 27 | * | ||
| 28 | * You should have received a copy of the GNU General Public License | ||
| 29 | * along with this program; if not, write to the Free Software | ||
| 30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 31 | */ | ||
| 32 | |||
| 33 | #include <linux/kernel.h> | ||
| 34 | #include <linux/module.h> | ||
| 35 | #include <linux/init.h> | ||
| 36 | #include <linux/acpi.h> | ||
| 37 | #include <linux/io.h> | ||
| 38 | #include <linux/kref.h> | ||
| 39 | #include <linux/rculist.h> | ||
| 40 | #include <linux/interrupt.h> | ||
| 41 | #include <linux/debugfs.h> | ||
| 42 | #include <acpi/atomicio.h> | ||
| 43 | |||
| 44 | #include "apei-internal.h" | ||
| 45 | |||
| 46 | #define APEI_PFX "APEI: " | ||
| 47 | |||
| 48 | /* | ||
| 49 | * APEI ERST (Error Record Serialization Table) and EINJ (Error | ||
| 50 | * INJection) interpreter framework. | ||
| 51 | */ | ||
| 52 | |||
| 53 | #define APEI_EXEC_PRESERVE_REGISTER 0x1 | ||
| 54 | |||
| 55 | void apei_exec_ctx_init(struct apei_exec_context *ctx, | ||
| 56 | struct apei_exec_ins_type *ins_table, | ||
| 57 | u32 instructions, | ||
| 58 | struct acpi_whea_header *action_table, | ||
| 59 | u32 entries) | ||
| 60 | { | ||
| 61 | ctx->ins_table = ins_table; | ||
| 62 | ctx->instructions = instructions; | ||
| 63 | ctx->action_table = action_table; | ||
| 64 | ctx->entries = entries; | ||
| 65 | } | ||
| 66 | EXPORT_SYMBOL_GPL(apei_exec_ctx_init); | ||
| 67 | |||
| 68 | int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) | ||
| 69 | { | ||
| 70 | int rc; | ||
| 71 | |||
| 72 | rc = acpi_atomic_read(val, &entry->register_region); | ||
| 73 | if (rc) | ||
| 74 | return rc; | ||
| 75 | *val >>= entry->register_region.bit_offset; | ||
| 76 | *val &= entry->mask; | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | int apei_exec_read_register(struct apei_exec_context *ctx, | ||
| 82 | struct acpi_whea_header *entry) | ||
| 83 | { | ||
| 84 | int rc; | ||
| 85 | u64 val = 0; | ||
| 86 | |||
| 87 | rc = __apei_exec_read_register(entry, &val); | ||
| 88 | if (rc) | ||
| 89 | return rc; | ||
| 90 | ctx->value = val; | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL_GPL(apei_exec_read_register); | ||
| 95 | |||
| 96 | int apei_exec_read_register_value(struct apei_exec_context *ctx, | ||
| 97 | struct acpi_whea_header *entry) | ||
| 98 | { | ||
| 99 | int rc; | ||
| 100 | |||
| 101 | rc = apei_exec_read_register(ctx, entry); | ||
| 102 | if (rc) | ||
| 103 | return rc; | ||
| 104 | ctx->value = (ctx->value == entry->value); | ||
| 105 | |||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | EXPORT_SYMBOL_GPL(apei_exec_read_register_value); | ||
| 109 | |||
| 110 | int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) | ||
| 111 | { | ||
| 112 | int rc; | ||
| 113 | |||
| 114 | val &= entry->mask; | ||
| 115 | val <<= entry->register_region.bit_offset; | ||
| 116 | if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { | ||
| 117 | u64 valr = 0; | ||
| 118 | rc = acpi_atomic_read(&valr, &entry->register_region); | ||
| 119 | if (rc) | ||
| 120 | return rc; | ||
| 121 | valr &= ~(entry->mask << entry->register_region.bit_offset); | ||
| 122 | val |= valr; | ||
| 123 | } | ||
| 124 | rc = acpi_atomic_write(val, &entry->register_region); | ||
| 125 | |||
| 126 | return rc; | ||
| 127 | } | ||
| 128 | |||
| 129 | int apei_exec_write_register(struct apei_exec_context *ctx, | ||
| 130 | struct acpi_whea_header *entry) | ||
| 131 | { | ||
| 132 | return __apei_exec_write_register(entry, ctx->value); | ||
| 133 | } | ||
| 134 | EXPORT_SYMBOL_GPL(apei_exec_write_register); | ||
| 135 | |||
| 136 | int apei_exec_write_register_value(struct apei_exec_context *ctx, | ||
| 137 | struct acpi_whea_header *entry) | ||
| 138 | { | ||
| 139 | int rc; | ||
| 140 | |||
| 141 | ctx->value = entry->value; | ||
| 142 | rc = apei_exec_write_register(ctx, entry); | ||
| 143 | |||
| 144 | return rc; | ||
| 145 | } | ||
| 146 | EXPORT_SYMBOL_GPL(apei_exec_write_register_value); | ||
| 147 | |||
| 148 | int apei_exec_noop(struct apei_exec_context *ctx, | ||
| 149 | struct acpi_whea_header *entry) | ||
| 150 | { | ||
| 151 | return 0; | ||
| 152 | } | ||
| 153 | EXPORT_SYMBOL_GPL(apei_exec_noop); | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Interpret the specified action. Go through whole action table, | ||
| 157 | * execute all instructions belong to the action. | ||
| 158 | */ | ||
| 159 | int apei_exec_run(struct apei_exec_context *ctx, u8 action) | ||
| 160 | { | ||
| 161 | int rc; | ||
| 162 | u32 i, ip; | ||
| 163 | struct acpi_whea_header *entry; | ||
| 164 | apei_exec_ins_func_t run; | ||
| 165 | |||
| 166 | ctx->ip = 0; | ||
| 167 | |||
| 168 | /* | ||
| 169 | * "ip" is the instruction pointer of current instruction, | ||
| 170 | * "ctx->ip" specifies the next instruction to executed, | ||
| 171 | * instruction "run" function may change the "ctx->ip" to | ||
| 172 | * implement "goto" semantics. | ||
| 173 | */ | ||
| 174 | rewind: | ||
| 175 | ip = 0; | ||
| 176 | for (i = 0; i < ctx->entries; i++) { | ||
| 177 | entry = &ctx->action_table[i]; | ||
| 178 | if (entry->action != action) | ||
| 179 | continue; | ||
| 180 | if (ip == ctx->ip) { | ||
| 181 | if (entry->instruction >= ctx->instructions || | ||
| 182 | !ctx->ins_table[entry->instruction].run) { | ||
| 183 | pr_warning(FW_WARN APEI_PFX | ||
| 184 | "Invalid action table, unknown instruction type: %d\n", | ||
| 185 | entry->instruction); | ||
| 186 | return -EINVAL; | ||
| 187 | } | ||
| 188 | run = ctx->ins_table[entry->instruction].run; | ||
| 189 | rc = run(ctx, entry); | ||
| 190 | if (rc < 0) | ||
| 191 | return rc; | ||
| 192 | else if (rc != APEI_EXEC_SET_IP) | ||
| 193 | ctx->ip++; | ||
| 194 | } | ||
| 195 | ip++; | ||
| 196 | if (ctx->ip < ip) | ||
| 197 | goto rewind; | ||
| 198 | } | ||
| 199 | |||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL_GPL(apei_exec_run); | ||
| 203 | |||
| 204 | typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, | ||
| 205 | struct acpi_whea_header *entry, | ||
| 206 | void *data); | ||
| 207 | |||
| 208 | static int apei_exec_for_each_entry(struct apei_exec_context *ctx, | ||
| 209 | apei_exec_entry_func_t func, | ||
| 210 | void *data, | ||
| 211 | int *end) | ||
| 212 | { | ||
| 213 | u8 ins; | ||
| 214 | int i, rc; | ||
| 215 | struct acpi_whea_header *entry; | ||
| 216 | struct apei_exec_ins_type *ins_table = ctx->ins_table; | ||
| 217 | |||
| 218 | for (i = 0; i < ctx->entries; i++) { | ||
| 219 | entry = ctx->action_table + i; | ||
| 220 | ins = entry->instruction; | ||
| 221 | if (end) | ||
| 222 | *end = i; | ||
| 223 | if (ins >= ctx->instructions || !ins_table[ins].run) { | ||
| 224 | pr_warning(FW_WARN APEI_PFX | ||
| 225 | "Invalid action table, unknown instruction type: %d\n", | ||
| 226 | ins); | ||
| 227 | return -EINVAL; | ||
| 228 | } | ||
| 229 | rc = func(ctx, entry, data); | ||
| 230 | if (rc) | ||
| 231 | return rc; | ||
| 232 | } | ||
| 233 | |||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | static int pre_map_gar_callback(struct apei_exec_context *ctx, | ||
| 238 | struct acpi_whea_header *entry, | ||
| 239 | void *data) | ||
| 240 | { | ||
| 241 | u8 ins = entry->instruction; | ||
| 242 | |||
| 243 | if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) | ||
| 244 | return acpi_pre_map_gar(&entry->register_region); | ||
| 245 | |||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | /* | ||
| 250 | * Pre-map all GARs in action table to make it possible to access them | ||
| 251 | * in NMI handler. | ||
| 252 | */ | ||
| 253 | int apei_exec_pre_map_gars(struct apei_exec_context *ctx) | ||
| 254 | { | ||
| 255 | int rc, end; | ||
| 256 | |||
| 257 | rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, | ||
| 258 | NULL, &end); | ||
| 259 | if (rc) { | ||
| 260 | struct apei_exec_context ctx_unmap; | ||
| 261 | memcpy(&ctx_unmap, ctx, sizeof(*ctx)); | ||
| 262 | ctx_unmap.entries = end; | ||
| 263 | apei_exec_post_unmap_gars(&ctx_unmap); | ||
| 264 | } | ||
| 265 | |||
| 266 | return rc; | ||
| 267 | } | ||
| 268 | EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); | ||
| 269 | |||
| 270 | static int post_unmap_gar_callback(struct apei_exec_context *ctx, | ||
| 271 | struct acpi_whea_header *entry, | ||
| 272 | void *data) | ||
| 273 | { | ||
| 274 | u8 ins = entry->instruction; | ||
| 275 | |||
| 276 | if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) | ||
| 277 | acpi_post_unmap_gar(&entry->register_region); | ||
| 278 | |||
| 279 | return 0; | ||
| 280 | } | ||
| 281 | |||
| 282 | /* Post-unmap all GAR in action table. */ | ||
| 283 | int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) | ||
| 284 | { | ||
| 285 | return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, | ||
| 286 | NULL, NULL); | ||
| 287 | } | ||
| 288 | EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); | ||
| 289 | |||
| 290 | /* | ||
| 291 | * Resource management for GARs in APEI | ||
| 292 | */ | ||
| 293 | struct apei_res { | ||
| 294 | struct list_head list; | ||
| 295 | unsigned long start; | ||
| 296 | unsigned long end; | ||
| 297 | }; | ||
| 298 | |||
| 299 | /* Collect all resources requested, to avoid conflict */ | ||
| 300 | struct apei_resources apei_resources_all = { | ||
| 301 | .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), | ||
| 302 | .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), | ||
| 303 | }; | ||
| 304 | |||
| 305 | static int apei_res_add(struct list_head *res_list, | ||
| 306 | unsigned long start, unsigned long size) | ||
| 307 | { | ||
| 308 | struct apei_res *res, *resn, *res_ins = NULL; | ||
| 309 | unsigned long end = start + size; | ||
| 310 | |||
| 311 | if (end <= start) | ||
| 312 | return 0; | ||
| 313 | repeat: | ||
| 314 | list_for_each_entry_safe(res, resn, res_list, list) { | ||
| 315 | if (res->start > end || res->end < start) | ||
| 316 | continue; | ||
| 317 | else if (end <= res->end && start >= res->start) { | ||
| 318 | kfree(res_ins); | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | list_del(&res->list); | ||
| 322 | res->start = start = min(res->start, start); | ||
| 323 | res->end = end = max(res->end, end); | ||
| 324 | kfree(res_ins); | ||
| 325 | res_ins = res; | ||
| 326 | goto repeat; | ||
| 327 | } | ||
| 328 | |||
| 329 | if (res_ins) | ||
| 330 | list_add(&res_ins->list, res_list); | ||
| 331 | else { | ||
| 332 | res_ins = kmalloc(sizeof(*res), GFP_KERNEL); | ||
| 333 | if (!res_ins) | ||
| 334 | return -ENOMEM; | ||
| 335 | res_ins->start = start; | ||
| 336 | res_ins->end = end; | ||
| 337 | list_add(&res_ins->list, res_list); | ||
| 338 | } | ||
| 339 | |||
| 340 | return 0; | ||
| 341 | } | ||
| 342 | |||
| 343 | static int apei_res_sub(struct list_head *res_list1, | ||
| 344 | struct list_head *res_list2) | ||
| 345 | { | ||
| 346 | struct apei_res *res1, *resn1, *res2, *res; | ||
| 347 | res1 = list_entry(res_list1->next, struct apei_res, list); | ||
| 348 | resn1 = list_entry(res1->list.next, struct apei_res, list); | ||
| 349 | while (&res1->list != res_list1) { | ||
| 350 | list_for_each_entry(res2, res_list2, list) { | ||
| 351 | if (res1->start >= res2->end || | ||
| 352 | res1->end <= res2->start) | ||
| 353 | continue; | ||
| 354 | else if (res1->end <= res2->end && | ||
| 355 | res1->start >= res2->start) { | ||
| 356 | list_del(&res1->list); | ||
| 357 | kfree(res1); | ||
| 358 | break; | ||
| 359 | } else if (res1->end > res2->end && | ||
| 360 | res1->start < res2->start) { | ||
| 361 | res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
| 362 | if (!res) | ||
| 363 | return -ENOMEM; | ||
| 364 | res->start = res2->end; | ||
| 365 | res->end = res1->end; | ||
| 366 | res1->end = res2->start; | ||
| 367 | list_add(&res->list, &res1->list); | ||
| 368 | resn1 = res; | ||
| 369 | } else { | ||
| 370 | if (res1->start < res2->start) | ||
| 371 | res1->end = res2->start; | ||
| 372 | else | ||
| 373 | res1->start = res2->end; | ||
| 374 | } | ||
| 375 | } | ||
| 376 | res1 = resn1; | ||
| 377 | resn1 = list_entry(resn1->list.next, struct apei_res, list); | ||
| 378 | } | ||
| 379 | |||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | static void apei_res_clean(struct list_head *res_list) | ||
| 384 | { | ||
| 385 | struct apei_res *res, *resn; | ||
| 386 | |||
| 387 | list_for_each_entry_safe(res, resn, res_list, list) { | ||
| 388 | list_del(&res->list); | ||
| 389 | kfree(res); | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | void apei_resources_fini(struct apei_resources *resources) | ||
| 394 | { | ||
| 395 | apei_res_clean(&resources->iomem); | ||
| 396 | apei_res_clean(&resources->ioport); | ||
| 397 | } | ||
| 398 | EXPORT_SYMBOL_GPL(apei_resources_fini); | ||
| 399 | |||
| 400 | static int apei_resources_merge(struct apei_resources *resources1, | ||
| 401 | struct apei_resources *resources2) | ||
| 402 | { | ||
| 403 | int rc; | ||
| 404 | struct apei_res *res; | ||
| 405 | |||
| 406 | list_for_each_entry(res, &resources2->iomem, list) { | ||
| 407 | rc = apei_res_add(&resources1->iomem, res->start, | ||
| 408 | res->end - res->start); | ||
| 409 | if (rc) | ||
| 410 | return rc; | ||
| 411 | } | ||
| 412 | list_for_each_entry(res, &resources2->ioport, list) { | ||
| 413 | rc = apei_res_add(&resources1->ioport, res->start, | ||
| 414 | res->end - res->start); | ||
| 415 | if (rc) | ||
| 416 | return rc; | ||
| 417 | } | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | /* | ||
| 423 | * EINJ has two groups of GARs (EINJ table entry and trigger table | ||
| 424 | * entry), so common resources are subtracted from the trigger table | ||
| 425 | * resources before the second requesting. | ||
| 426 | */ | ||
| 427 | int apei_resources_sub(struct apei_resources *resources1, | ||
| 428 | struct apei_resources *resources2) | ||
| 429 | { | ||
| 430 | int rc; | ||
| 431 | |||
| 432 | rc = apei_res_sub(&resources1->iomem, &resources2->iomem); | ||
| 433 | if (rc) | ||
| 434 | return rc; | ||
| 435 | return apei_res_sub(&resources1->ioport, &resources2->ioport); | ||
| 436 | } | ||
| 437 | EXPORT_SYMBOL_GPL(apei_resources_sub); | ||
| 438 | |||
| 439 | /* | ||
| 440 | * IO memory/port rersource management mechanism is used to check | ||
| 441 | * whether memory/port area used by GARs conflicts with normal memory | ||
| 442 | * or IO memory/port of devices. | ||
| 443 | */ | ||
| 444 | int apei_resources_request(struct apei_resources *resources, | ||
| 445 | const char *desc) | ||
| 446 | { | ||
| 447 | struct apei_res *res, *res_bak; | ||
| 448 | struct resource *r; | ||
| 449 | |||
| 450 | apei_resources_sub(resources, &apei_resources_all); | ||
| 451 | |||
| 452 | list_for_each_entry(res, &resources->iomem, list) { | ||
| 453 | r = request_mem_region(res->start, res->end - res->start, | ||
| 454 | desc); | ||
| 455 | if (!r) { | ||
| 456 | pr_err(APEI_PFX | ||
| 457 | "Can not request iomem region <%016llx-%016llx> for GARs.\n", | ||
| 458 | (unsigned long long)res->start, | ||
| 459 | (unsigned long long)res->end); | ||
| 460 | res_bak = res; | ||
| 461 | goto err_unmap_iomem; | ||
| 462 | } | ||
| 463 | } | ||
| 464 | |||
| 465 | list_for_each_entry(res, &resources->ioport, list) { | ||
| 466 | r = request_region(res->start, res->end - res->start, desc); | ||
| 467 | if (!r) { | ||
| 468 | pr_err(APEI_PFX | ||
| 469 | "Can not request ioport region <%016llx-%016llx> for GARs.\n", | ||
| 470 | (unsigned long long)res->start, | ||
| 471 | (unsigned long long)res->end); | ||
| 472 | res_bak = res; | ||
| 473 | goto err_unmap_ioport; | ||
| 474 | } | ||
| 475 | } | ||
| 476 | |||
| 477 | apei_resources_merge(&apei_resources_all, resources); | ||
| 478 | |||
| 479 | return 0; | ||
| 480 | err_unmap_ioport: | ||
| 481 | list_for_each_entry(res, &resources->ioport, list) { | ||
| 482 | if (res == res_bak) | ||
| 483 | break; | ||
| 484 | release_mem_region(res->start, res->end - res->start); | ||
| 485 | } | ||
| 486 | res_bak = NULL; | ||
| 487 | err_unmap_iomem: | ||
| 488 | list_for_each_entry(res, &resources->iomem, list) { | ||
| 489 | if (res == res_bak) | ||
| 490 | break; | ||
| 491 | release_region(res->start, res->end - res->start); | ||
| 492 | } | ||
| 493 | return -EINVAL; | ||
| 494 | } | ||
| 495 | EXPORT_SYMBOL_GPL(apei_resources_request); | ||
| 496 | |||
| 497 | void apei_resources_release(struct apei_resources *resources) | ||
| 498 | { | ||
| 499 | struct apei_res *res; | ||
| 500 | |||
| 501 | list_for_each_entry(res, &resources->iomem, list) | ||
| 502 | release_mem_region(res->start, res->end - res->start); | ||
| 503 | list_for_each_entry(res, &resources->ioport, list) | ||
| 504 | release_region(res->start, res->end - res->start); | ||
| 505 | |||
| 506 | apei_resources_sub(&apei_resources_all, resources); | ||
| 507 | } | ||
| 508 | EXPORT_SYMBOL_GPL(apei_resources_release); | ||
| 509 | |||
| 510 | static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) | ||
| 511 | { | ||
| 512 | u32 width, space_id; | ||
| 513 | |||
| 514 | width = reg->bit_width; | ||
| 515 | space_id = reg->space_id; | ||
| 516 | /* Handle possible alignment issues */ | ||
| 517 | memcpy(paddr, ®->address, sizeof(*paddr)); | ||
| 518 | if (!*paddr) { | ||
| 519 | pr_warning(FW_BUG APEI_PFX | ||
| 520 | "Invalid physical address in GAR [0x%llx/%u/%u]\n", | ||
| 521 | *paddr, width, space_id); | ||
| 522 | return -EINVAL; | ||
| 523 | } | ||
| 524 | |||
| 525 | if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { | ||
| 526 | pr_warning(FW_BUG APEI_PFX | ||
| 527 | "Invalid bit width in GAR [0x%llx/%u/%u]\n", | ||
| 528 | *paddr, width, space_id); | ||
| 529 | return -EINVAL; | ||
| 530 | } | ||
| 531 | |||
| 532 | if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && | ||
| 533 | space_id != ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 534 | pr_warning(FW_BUG APEI_PFX | ||
| 535 | "Invalid address space type in GAR [0x%llx/%u/%u]\n", | ||
| 536 | *paddr, width, space_id); | ||
| 537 | return -EINVAL; | ||
| 538 | } | ||
| 539 | |||
| 540 | return 0; | ||
| 541 | } | ||
| 542 | |||
| 543 | static int collect_res_callback(struct apei_exec_context *ctx, | ||
| 544 | struct acpi_whea_header *entry, | ||
| 545 | void *data) | ||
| 546 | { | ||
| 547 | struct apei_resources *resources = data; | ||
| 548 | struct acpi_generic_address *reg = &entry->register_region; | ||
| 549 | u8 ins = entry->instruction; | ||
| 550 | u64 paddr; | ||
| 551 | int rc; | ||
| 552 | |||
| 553 | if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) | ||
| 554 | return 0; | ||
| 555 | |||
| 556 | rc = apei_check_gar(reg, &paddr); | ||
| 557 | if (rc) | ||
| 558 | return rc; | ||
| 559 | |||
| 560 | switch (reg->space_id) { | ||
| 561 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
| 562 | return apei_res_add(&resources->iomem, paddr, | ||
| 563 | reg->bit_width / 8); | ||
| 564 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
| 565 | return apei_res_add(&resources->ioport, paddr, | ||
| 566 | reg->bit_width / 8); | ||
| 567 | default: | ||
| 568 | return -EINVAL; | ||
| 569 | } | ||
| 570 | } | ||
| 571 | |||
| 572 | /* | ||
| 573 | * Same register may be used by multiple instructions in GARs, so | ||
| 574 | * resources are collected before requesting. | ||
| 575 | */ | ||
| 576 | int apei_exec_collect_resources(struct apei_exec_context *ctx, | ||
| 577 | struct apei_resources *resources) | ||
| 578 | { | ||
| 579 | return apei_exec_for_each_entry(ctx, collect_res_callback, | ||
| 580 | resources, NULL); | ||
| 581 | } | ||
| 582 | EXPORT_SYMBOL_GPL(apei_exec_collect_resources); | ||
| 583 | |||
| 584 | struct dentry *apei_get_debugfs_dir(void) | ||
| 585 | { | ||
| 586 | static struct dentry *dapei; | ||
| 587 | |||
| 588 | if (!dapei) | ||
| 589 | dapei = debugfs_create_dir("apei", NULL); | ||
| 590 | |||
| 591 | return dapei; | ||
| 592 | } | ||
| 593 | EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); | ||
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h new file mode 100644 index 000000000000..18df1e940276 --- /dev/null +++ b/drivers/acpi/apei/apei-internal.h | |||
| @@ -0,0 +1,114 @@ | |||
| 1 | /* | ||
| 2 | * apei-internal.h - ACPI Platform Error Interface internal | ||
| 3 | * definations. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef APEI_INTERNAL_H | ||
| 7 | #define APEI_INTERNAL_H | ||
| 8 | |||
| 9 | #include <linux/cper.h> | ||
| 10 | |||
| 11 | struct apei_exec_context; | ||
| 12 | |||
| 13 | typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx, | ||
| 14 | struct acpi_whea_header *entry); | ||
| 15 | |||
| 16 | #define APEI_EXEC_INS_ACCESS_REGISTER 0x0001 | ||
| 17 | |||
| 18 | struct apei_exec_ins_type { | ||
| 19 | u32 flags; | ||
| 20 | apei_exec_ins_func_t run; | ||
| 21 | }; | ||
| 22 | |||
| 23 | struct apei_exec_context { | ||
| 24 | u32 ip; | ||
| 25 | u64 value; | ||
| 26 | u64 var1; | ||
| 27 | u64 var2; | ||
| 28 | u64 src_base; | ||
| 29 | u64 dst_base; | ||
| 30 | struct apei_exec_ins_type *ins_table; | ||
| 31 | u32 instructions; | ||
| 32 | struct acpi_whea_header *action_table; | ||
| 33 | u32 entries; | ||
| 34 | }; | ||
| 35 | |||
| 36 | void apei_exec_ctx_init(struct apei_exec_context *ctx, | ||
| 37 | struct apei_exec_ins_type *ins_table, | ||
| 38 | u32 instructions, | ||
| 39 | struct acpi_whea_header *action_table, | ||
| 40 | u32 entries); | ||
| 41 | |||
| 42 | static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx, | ||
| 43 | u64 input) | ||
| 44 | { | ||
| 45 | ctx->value = input; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) | ||
| 49 | { | ||
| 50 | return ctx->value; | ||
| 51 | } | ||
| 52 | |||
| 53 | int apei_exec_run(struct apei_exec_context *ctx, u8 action); | ||
| 54 | |||
| 55 | /* Common instruction implementation */ | ||
| 56 | |||
| 57 | /* IP has been set in instruction function */ | ||
| 58 | #define APEI_EXEC_SET_IP 1 | ||
| 59 | |||
| 60 | int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); | ||
| 61 | int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); | ||
| 62 | int apei_exec_read_register(struct apei_exec_context *ctx, | ||
| 63 | struct acpi_whea_header *entry); | ||
| 64 | int apei_exec_read_register_value(struct apei_exec_context *ctx, | ||
| 65 | struct acpi_whea_header *entry); | ||
| 66 | int apei_exec_write_register(struct apei_exec_context *ctx, | ||
| 67 | struct acpi_whea_header *entry); | ||
| 68 | int apei_exec_write_register_value(struct apei_exec_context *ctx, | ||
| 69 | struct acpi_whea_header *entry); | ||
| 70 | int apei_exec_noop(struct apei_exec_context *ctx, | ||
| 71 | struct acpi_whea_header *entry); | ||
| 72 | int apei_exec_pre_map_gars(struct apei_exec_context *ctx); | ||
| 73 | int apei_exec_post_unmap_gars(struct apei_exec_context *ctx); | ||
| 74 | |||
| 75 | struct apei_resources { | ||
| 76 | struct list_head iomem; | ||
| 77 | struct list_head ioport; | ||
| 78 | }; | ||
| 79 | |||
| 80 | static inline void apei_resources_init(struct apei_resources *resources) | ||
| 81 | { | ||
| 82 | INIT_LIST_HEAD(&resources->iomem); | ||
| 83 | INIT_LIST_HEAD(&resources->ioport); | ||
| 84 | } | ||
| 85 | |||
| 86 | void apei_resources_fini(struct apei_resources *resources); | ||
| 87 | int apei_resources_sub(struct apei_resources *resources1, | ||
| 88 | struct apei_resources *resources2); | ||
| 89 | int apei_resources_request(struct apei_resources *resources, | ||
| 90 | const char *desc); | ||
| 91 | void apei_resources_release(struct apei_resources *resources); | ||
| 92 | int apei_exec_collect_resources(struct apei_exec_context *ctx, | ||
| 93 | struct apei_resources *resources); | ||
| 94 | |||
| 95 | struct dentry; | ||
| 96 | struct dentry *apei_get_debugfs_dir(void); | ||
| 97 | |||
| 98 | #define apei_estatus_for_each_section(estatus, section) \ | ||
| 99 | for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ | ||
| 100 | (void *)section - (void *)estatus < estatus->data_length; \ | ||
| 101 | section = (void *)(section+1) + section->error_data_length) | ||
| 102 | |||
| 103 | static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus) | ||
| 104 | { | ||
| 105 | if (estatus->raw_data_length) | ||
| 106 | return estatus->raw_data_offset + \ | ||
| 107 | estatus->raw_data_length; | ||
| 108 | else | ||
| 109 | return sizeof(*estatus) + estatus->data_length; | ||
| 110 | } | ||
| 111 | |||
| 112 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); | ||
| 113 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus); | ||
| 114 | #endif | ||
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c new file mode 100644 index 000000000000..f4cf2fc4c8c1 --- /dev/null +++ b/drivers/acpi/apei/cper.c | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | /* | ||
| 2 | * UEFI Common Platform Error Record (CPER) support | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010, Intel Corp. | ||
| 5 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 6 | * | ||
| 7 | * CPER is the format used to describe platform hardware error by | ||
| 8 | * various APEI tables, such as ERST, BERT and HEST etc. | ||
| 9 | * | ||
| 10 | * For more information about CPER, please refer to Appendix N of UEFI | ||
| 11 | * Specification version 2.3. | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or | ||
| 14 | * modify it under the terms of the GNU General Public License version | ||
| 15 | * 2 as published by the Free Software Foundation. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope that it will be useful, | ||
| 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | * GNU General Public License for more details. | ||
| 21 | * | ||
| 22 | * You should have received a copy of the GNU General Public License | ||
| 23 | * along with this program; if not, write to the Free Software | ||
| 24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/time.h> | ||
| 30 | #include <linux/cper.h> | ||
| 31 | #include <linux/acpi.h> | ||
| 32 | |||
| 33 | /* | ||
| 34 | * CPER record ID need to be unique even after reboot, because record | ||
| 35 | * ID is used as index for ERST storage, while CPER records from | ||
| 36 | * multiple boot may co-exist in ERST. | ||
| 37 | */ | ||
| 38 | u64 cper_next_record_id(void) | ||
| 39 | { | ||
| 40 | static atomic64_t seq; | ||
| 41 | |||
| 42 | if (!atomic64_read(&seq)) | ||
| 43 | atomic64_set(&seq, ((u64)get_seconds()) << 32); | ||
| 44 | |||
| 45 | return atomic64_inc_return(&seq); | ||
| 46 | } | ||
| 47 | EXPORT_SYMBOL_GPL(cper_next_record_id); | ||
| 48 | |||
| 49 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) | ||
| 50 | { | ||
| 51 | if (estatus->data_length && | ||
| 52 | estatus->data_length < sizeof(struct acpi_hest_generic_data)) | ||
| 53 | return -EINVAL; | ||
| 54 | if (estatus->raw_data_length && | ||
| 55 | estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length) | ||
| 56 | return -EINVAL; | ||
| 57 | |||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | EXPORT_SYMBOL_GPL(apei_estatus_check_header); | ||
| 61 | |||
| 62 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus) | ||
| 63 | { | ||
| 64 | struct acpi_hest_generic_data *gdata; | ||
| 65 | unsigned int data_len, gedata_len; | ||
| 66 | int rc; | ||
| 67 | |||
| 68 | rc = apei_estatus_check_header(estatus); | ||
| 69 | if (rc) | ||
| 70 | return rc; | ||
| 71 | data_len = estatus->data_length; | ||
| 72 | gdata = (struct acpi_hest_generic_data *)(estatus + 1); | ||
| 73 | while (data_len > sizeof(*gdata)) { | ||
| 74 | gedata_len = gdata->error_data_length; | ||
| 75 | if (gedata_len > data_len - sizeof(*gdata)) | ||
| 76 | return -EINVAL; | ||
| 77 | data_len -= gedata_len + sizeof(*gdata); | ||
| 78 | } | ||
| 79 | if (data_len) | ||
| 80 | return -EINVAL; | ||
| 81 | |||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | EXPORT_SYMBOL_GPL(apei_estatus_check); | ||
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c new file mode 100644 index 000000000000..465c885938ee --- /dev/null +++ b/drivers/acpi/apei/einj.c | |||
| @@ -0,0 +1,548 @@ | |||
| 1 | /* | ||
| 2 | * APEI Error INJection support | ||
| 3 | * | ||
| 4 | * EINJ provides a hardware error injection mechanism, this is useful | ||
| 5 | * for debugging and testing of other APEI and RAS features. | ||
| 6 | * | ||
| 7 | * For more information about EINJ, please refer to ACPI Specification | ||
| 8 | * version 4.0, section 17.5. | ||
| 9 | * | ||
| 10 | * Copyright 2009-2010 Intel Corp. | ||
| 11 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or | ||
| 14 | * modify it under the terms of the GNU General Public License version | ||
| 15 | * 2 as published by the Free Software Foundation. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope that it will be useful, | ||
| 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | * GNU General Public License for more details. | ||
| 21 | * | ||
| 22 | * You should have received a copy of the GNU General Public License | ||
| 23 | * along with this program; if not, write to the Free Software | ||
| 24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/io.h> | ||
| 31 | #include <linux/debugfs.h> | ||
| 32 | #include <linux/seq_file.h> | ||
| 33 | #include <linux/nmi.h> | ||
| 34 | #include <linux/delay.h> | ||
| 35 | #include <acpi/acpi.h> | ||
| 36 | |||
| 37 | #include "apei-internal.h" | ||
| 38 | |||
| 39 | #define EINJ_PFX "EINJ: " | ||
| 40 | |||
| 41 | #define SPIN_UNIT 100 /* 100ns */ | ||
| 42 | /* Firmware should respond within 1 miliseconds */ | ||
| 43 | #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the | ||
| 47 | * EINJ table through an unpublished extension. Use with caution as | ||
| 48 | * most will ignore the parameter and make their own choice of address | ||
| 49 | * for error injection. | ||
| 50 | */ | ||
| 51 | struct einj_parameter { | ||
| 52 | u64 type; | ||
| 53 | u64 reserved1; | ||
| 54 | u64 reserved2; | ||
| 55 | u64 param1; | ||
| 56 | u64 param2; | ||
| 57 | }; | ||
| 58 | |||
| 59 | #define EINJ_OP_BUSY 0x1 | ||
| 60 | #define EINJ_STATUS_SUCCESS 0x0 | ||
| 61 | #define EINJ_STATUS_FAIL 0x1 | ||
| 62 | #define EINJ_STATUS_INVAL 0x2 | ||
| 63 | |||
| 64 | #define EINJ_TAB_ENTRY(tab) \ | ||
| 65 | ((struct acpi_whea_header *)((char *)(tab) + \ | ||
| 66 | sizeof(struct acpi_table_einj))) | ||
| 67 | |||
| 68 | static struct acpi_table_einj *einj_tab; | ||
| 69 | |||
| 70 | static struct apei_resources einj_resources; | ||
| 71 | |||
| 72 | static struct apei_exec_ins_type einj_ins_type[] = { | ||
| 73 | [ACPI_EINJ_READ_REGISTER] = { | ||
| 74 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 75 | .run = apei_exec_read_register, | ||
| 76 | }, | ||
| 77 | [ACPI_EINJ_READ_REGISTER_VALUE] = { | ||
| 78 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 79 | .run = apei_exec_read_register_value, | ||
| 80 | }, | ||
| 81 | [ACPI_EINJ_WRITE_REGISTER] = { | ||
| 82 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 83 | .run = apei_exec_write_register, | ||
| 84 | }, | ||
| 85 | [ACPI_EINJ_WRITE_REGISTER_VALUE] = { | ||
| 86 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 87 | .run = apei_exec_write_register_value, | ||
| 88 | }, | ||
| 89 | [ACPI_EINJ_NOOP] = { | ||
| 90 | .flags = 0, | ||
| 91 | .run = apei_exec_noop, | ||
| 92 | }, | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Prevent EINJ interpreter to run simultaneously, because the | ||
| 97 | * corresponding firmware implementation may not work properly when | ||
| 98 | * invoked simultaneously. | ||
| 99 | */ | ||
| 100 | static DEFINE_MUTEX(einj_mutex); | ||
| 101 | |||
| 102 | static struct einj_parameter *einj_param; | ||
| 103 | |||
| 104 | static void einj_exec_ctx_init(struct apei_exec_context *ctx) | ||
| 105 | { | ||
| 106 | apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), | ||
| 107 | EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int __einj_get_available_error_type(u32 *type) | ||
| 111 | { | ||
| 112 | struct apei_exec_context ctx; | ||
| 113 | int rc; | ||
| 114 | |||
| 115 | einj_exec_ctx_init(&ctx); | ||
| 116 | rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); | ||
| 117 | if (rc) | ||
| 118 | return rc; | ||
| 119 | *type = apei_exec_ctx_get_output(&ctx); | ||
| 120 | |||
| 121 | return 0; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* Get error injection capabilities of the platform */ | ||
| 125 | static int einj_get_available_error_type(u32 *type) | ||
| 126 | { | ||
| 127 | int rc; | ||
| 128 | |||
| 129 | mutex_lock(&einj_mutex); | ||
| 130 | rc = __einj_get_available_error_type(type); | ||
| 131 | mutex_unlock(&einj_mutex); | ||
| 132 | |||
| 133 | return rc; | ||
| 134 | } | ||
| 135 | |||
| 136 | static int einj_timedout(u64 *t) | ||
| 137 | { | ||
| 138 | if ((s64)*t < SPIN_UNIT) { | ||
| 139 | pr_warning(FW_WARN EINJ_PFX | ||
| 140 | "Firmware does not respond in time\n"); | ||
| 141 | return 1; | ||
| 142 | } | ||
| 143 | *t -= SPIN_UNIT; | ||
| 144 | ndelay(SPIN_UNIT); | ||
| 145 | touch_nmi_watchdog(); | ||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | static u64 einj_get_parameter_address(void) | ||
| 150 | { | ||
| 151 | int i; | ||
| 152 | u64 paddr = 0; | ||
| 153 | struct acpi_whea_header *entry; | ||
| 154 | |||
| 155 | entry = EINJ_TAB_ENTRY(einj_tab); | ||
| 156 | for (i = 0; i < einj_tab->entries; i++) { | ||
| 157 | if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && | ||
| 158 | entry->instruction == ACPI_EINJ_WRITE_REGISTER && | ||
| 159 | entry->register_region.space_id == | ||
| 160 | ACPI_ADR_SPACE_SYSTEM_MEMORY) | ||
| 161 | memcpy(&paddr, &entry->register_region.address, | ||
| 162 | sizeof(paddr)); | ||
| 163 | entry++; | ||
| 164 | } | ||
| 165 | |||
| 166 | return paddr; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* do sanity check to trigger table */ | ||
| 170 | static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) | ||
| 171 | { | ||
| 172 | if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) | ||
| 173 | return -EINVAL; | ||
| 174 | if (trigger_tab->table_size > PAGE_SIZE || | ||
| 175 | trigger_tab->table_size <= trigger_tab->header_size) | ||
| 176 | return -EINVAL; | ||
| 177 | if (trigger_tab->entry_count != | ||
| 178 | (trigger_tab->table_size - trigger_tab->header_size) / | ||
| 179 | sizeof(struct acpi_einj_entry)) | ||
| 180 | return -EINVAL; | ||
| 181 | |||
| 182 | return 0; | ||
| 183 | } | ||
| 184 | |||
| 185 | /* Execute instructions in trigger error action table */ | ||
| 186 | static int __einj_error_trigger(u64 trigger_paddr) | ||
| 187 | { | ||
| 188 | struct acpi_einj_trigger *trigger_tab = NULL; | ||
| 189 | struct apei_exec_context trigger_ctx; | ||
| 190 | struct apei_resources trigger_resources; | ||
| 191 | struct acpi_whea_header *trigger_entry; | ||
| 192 | struct resource *r; | ||
| 193 | u32 table_size; | ||
| 194 | int rc = -EIO; | ||
| 195 | |||
| 196 | r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), | ||
| 197 | "APEI EINJ Trigger Table"); | ||
| 198 | if (!r) { | ||
| 199 | pr_err(EINJ_PFX | ||
| 200 | "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", | ||
| 201 | (unsigned long long)trigger_paddr, | ||
| 202 | (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); | ||
| 203 | goto out; | ||
| 204 | } | ||
| 205 | trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); | ||
| 206 | if (!trigger_tab) { | ||
| 207 | pr_err(EINJ_PFX "Failed to map trigger table!\n"); | ||
| 208 | goto out_rel_header; | ||
| 209 | } | ||
| 210 | rc = einj_check_trigger_header(trigger_tab); | ||
| 211 | if (rc) { | ||
| 212 | pr_warning(FW_BUG EINJ_PFX | ||
| 213 | "The trigger error action table is invalid\n"); | ||
| 214 | goto out_rel_header; | ||
| 215 | } | ||
| 216 | rc = -EIO; | ||
| 217 | table_size = trigger_tab->table_size; | ||
| 218 | r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), | ||
| 219 | table_size - sizeof(*trigger_tab), | ||
| 220 | "APEI EINJ Trigger Table"); | ||
| 221 | if (!r) { | ||
| 222 | pr_err(EINJ_PFX | ||
| 223 | "Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", | ||
| 224 | (unsigned long long)trigger_paddr+sizeof(*trigger_tab), | ||
| 225 | (unsigned long long)trigger_paddr + table_size); | ||
| 226 | goto out_rel_header; | ||
| 227 | } | ||
| 228 | iounmap(trigger_tab); | ||
| 229 | trigger_tab = ioremap_cache(trigger_paddr, table_size); | ||
| 230 | if (!trigger_tab) { | ||
| 231 | pr_err(EINJ_PFX "Failed to map trigger table!\n"); | ||
| 232 | goto out_rel_entry; | ||
| 233 | } | ||
| 234 | trigger_entry = (struct acpi_whea_header *) | ||
| 235 | ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); | ||
| 236 | apei_resources_init(&trigger_resources); | ||
| 237 | apei_exec_ctx_init(&trigger_ctx, einj_ins_type, | ||
| 238 | ARRAY_SIZE(einj_ins_type), | ||
| 239 | trigger_entry, trigger_tab->entry_count); | ||
| 240 | rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); | ||
| 241 | if (rc) | ||
| 242 | goto out_fini; | ||
| 243 | rc = apei_resources_sub(&trigger_resources, &einj_resources); | ||
| 244 | if (rc) | ||
| 245 | goto out_fini; | ||
| 246 | rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); | ||
| 247 | if (rc) | ||
| 248 | goto out_fini; | ||
| 249 | rc = apei_exec_pre_map_gars(&trigger_ctx); | ||
| 250 | if (rc) | ||
| 251 | goto out_release; | ||
| 252 | |||
| 253 | rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); | ||
| 254 | |||
| 255 | apei_exec_post_unmap_gars(&trigger_ctx); | ||
| 256 | out_release: | ||
| 257 | apei_resources_release(&trigger_resources); | ||
| 258 | out_fini: | ||
| 259 | apei_resources_fini(&trigger_resources); | ||
| 260 | out_rel_entry: | ||
| 261 | release_mem_region(trigger_paddr + sizeof(*trigger_tab), | ||
| 262 | table_size - sizeof(*trigger_tab)); | ||
| 263 | out_rel_header: | ||
| 264 | release_mem_region(trigger_paddr, sizeof(*trigger_tab)); | ||
| 265 | out: | ||
| 266 | if (trigger_tab) | ||
| 267 | iounmap(trigger_tab); | ||
| 268 | |||
| 269 | return rc; | ||
| 270 | } | ||
| 271 | |||
| 272 | static int __einj_error_inject(u32 type, u64 param1, u64 param2) | ||
| 273 | { | ||
| 274 | struct apei_exec_context ctx; | ||
| 275 | u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; | ||
| 276 | int rc; | ||
| 277 | |||
| 278 | einj_exec_ctx_init(&ctx); | ||
| 279 | |||
| 280 | rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); | ||
| 281 | if (rc) | ||
| 282 | return rc; | ||
| 283 | apei_exec_ctx_set_input(&ctx, type); | ||
| 284 | rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); | ||
| 285 | if (rc) | ||
| 286 | return rc; | ||
| 287 | if (einj_param) { | ||
| 288 | writeq(param1, &einj_param->param1); | ||
| 289 | writeq(param2, &einj_param->param2); | ||
| 290 | } | ||
| 291 | rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); | ||
| 292 | if (rc) | ||
| 293 | return rc; | ||
| 294 | for (;;) { | ||
| 295 | rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); | ||
| 296 | if (rc) | ||
| 297 | return rc; | ||
| 298 | val = apei_exec_ctx_get_output(&ctx); | ||
| 299 | if (!(val & EINJ_OP_BUSY)) | ||
| 300 | break; | ||
| 301 | if (einj_timedout(&timeout)) | ||
| 302 | return -EIO; | ||
| 303 | } | ||
| 304 | rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); | ||
| 305 | if (rc) | ||
| 306 | return rc; | ||
| 307 | val = apei_exec_ctx_get_output(&ctx); | ||
| 308 | if (val != EINJ_STATUS_SUCCESS) | ||
| 309 | return -EBUSY; | ||
| 310 | |||
| 311 | rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); | ||
| 312 | if (rc) | ||
| 313 | return rc; | ||
| 314 | trigger_paddr = apei_exec_ctx_get_output(&ctx); | ||
| 315 | rc = __einj_error_trigger(trigger_paddr); | ||
| 316 | if (rc) | ||
| 317 | return rc; | ||
| 318 | rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); | ||
| 319 | |||
| 320 | return rc; | ||
| 321 | } | ||
| 322 | |||
| 323 | /* Inject the specified hardware error */ | ||
| 324 | static int einj_error_inject(u32 type, u64 param1, u64 param2) | ||
| 325 | { | ||
| 326 | int rc; | ||
| 327 | |||
| 328 | mutex_lock(&einj_mutex); | ||
| 329 | rc = __einj_error_inject(type, param1, param2); | ||
| 330 | mutex_unlock(&einj_mutex); | ||
| 331 | |||
| 332 | return rc; | ||
| 333 | } | ||
| 334 | |||
| 335 | static u32 error_type; | ||
| 336 | static u64 error_param1; | ||
| 337 | static u64 error_param2; | ||
| 338 | static struct dentry *einj_debug_dir; | ||
| 339 | |||
| 340 | static int available_error_type_show(struct seq_file *m, void *v) | ||
| 341 | { | ||
| 342 | int rc; | ||
| 343 | u32 available_error_type = 0; | ||
| 344 | |||
| 345 | rc = einj_get_available_error_type(&available_error_type); | ||
| 346 | if (rc) | ||
| 347 | return rc; | ||
| 348 | if (available_error_type & 0x0001) | ||
| 349 | seq_printf(m, "0x00000001\tProcessor Correctable\n"); | ||
| 350 | if (available_error_type & 0x0002) | ||
| 351 | seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n"); | ||
| 352 | if (available_error_type & 0x0004) | ||
| 353 | seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n"); | ||
| 354 | if (available_error_type & 0x0008) | ||
| 355 | seq_printf(m, "0x00000008\tMemory Correctable\n"); | ||
| 356 | if (available_error_type & 0x0010) | ||
| 357 | seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n"); | ||
| 358 | if (available_error_type & 0x0020) | ||
| 359 | seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n"); | ||
| 360 | if (available_error_type & 0x0040) | ||
| 361 | seq_printf(m, "0x00000040\tPCI Express Correctable\n"); | ||
| 362 | if (available_error_type & 0x0080) | ||
| 363 | seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n"); | ||
| 364 | if (available_error_type & 0x0100) | ||
| 365 | seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n"); | ||
| 366 | if (available_error_type & 0x0200) | ||
| 367 | seq_printf(m, "0x00000200\tPlatform Correctable\n"); | ||
| 368 | if (available_error_type & 0x0400) | ||
| 369 | seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n"); | ||
| 370 | if (available_error_type & 0x0800) | ||
| 371 | seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n"); | ||
| 372 | |||
| 373 | return 0; | ||
| 374 | } | ||
| 375 | |||
| 376 | static int available_error_type_open(struct inode *inode, struct file *file) | ||
| 377 | { | ||
| 378 | return single_open(file, available_error_type_show, NULL); | ||
| 379 | } | ||
| 380 | |||
| 381 | static const struct file_operations available_error_type_fops = { | ||
| 382 | .open = available_error_type_open, | ||
| 383 | .read = seq_read, | ||
| 384 | .llseek = seq_lseek, | ||
| 385 | .release = single_release, | ||
| 386 | }; | ||
| 387 | |||
| 388 | static int error_type_get(void *data, u64 *val) | ||
| 389 | { | ||
| 390 | *val = error_type; | ||
| 391 | |||
| 392 | return 0; | ||
| 393 | } | ||
| 394 | |||
| 395 | static int error_type_set(void *data, u64 val) | ||
| 396 | { | ||
| 397 | int rc; | ||
| 398 | u32 available_error_type = 0; | ||
| 399 | |||
| 400 | /* Only one error type can be specified */ | ||
| 401 | if (val & (val - 1)) | ||
| 402 | return -EINVAL; | ||
| 403 | rc = einj_get_available_error_type(&available_error_type); | ||
| 404 | if (rc) | ||
| 405 | return rc; | ||
| 406 | if (!(val & available_error_type)) | ||
| 407 | return -EINVAL; | ||
| 408 | error_type = val; | ||
| 409 | |||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, | ||
| 414 | error_type_set, "0x%llx\n"); | ||
| 415 | |||
| 416 | static int error_inject_set(void *data, u64 val) | ||
| 417 | { | ||
| 418 | if (!error_type) | ||
| 419 | return -EINVAL; | ||
| 420 | |||
| 421 | return einj_error_inject(error_type, error_param1, error_param2); | ||
| 422 | } | ||
| 423 | |||
| 424 | DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, | ||
| 425 | error_inject_set, "%llu\n"); | ||
| 426 | |||
| 427 | static int einj_check_table(struct acpi_table_einj *einj_tab) | ||
| 428 | { | ||
| 429 | if (einj_tab->header_length != sizeof(struct acpi_table_einj)) | ||
| 430 | return -EINVAL; | ||
| 431 | if (einj_tab->header.length < sizeof(struct acpi_table_einj)) | ||
| 432 | return -EINVAL; | ||
| 433 | if (einj_tab->entries != | ||
| 434 | (einj_tab->header.length - sizeof(struct acpi_table_einj)) / | ||
| 435 | sizeof(struct acpi_einj_entry)) | ||
| 436 | return -EINVAL; | ||
| 437 | |||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | static int __init einj_init(void) | ||
| 442 | { | ||
| 443 | int rc; | ||
| 444 | u64 param_paddr; | ||
| 445 | acpi_status status; | ||
| 446 | struct dentry *fentry; | ||
| 447 | struct apei_exec_context ctx; | ||
| 448 | |||
| 449 | if (acpi_disabled) | ||
| 450 | return -ENODEV; | ||
| 451 | |||
| 452 | status = acpi_get_table(ACPI_SIG_EINJ, 0, | ||
| 453 | (struct acpi_table_header **)&einj_tab); | ||
| 454 | if (status == AE_NOT_FOUND) { | ||
| 455 | pr_info(EINJ_PFX "Table is not found!\n"); | ||
| 456 | return -ENODEV; | ||
| 457 | } else if (ACPI_FAILURE(status)) { | ||
| 458 | const char *msg = acpi_format_exception(status); | ||
| 459 | pr_err(EINJ_PFX "Failed to get table, %s\n", msg); | ||
| 460 | return -EINVAL; | ||
| 461 | } | ||
| 462 | |||
| 463 | rc = einj_check_table(einj_tab); | ||
| 464 | if (rc) { | ||
| 465 | pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n"); | ||
| 466 | return -EINVAL; | ||
| 467 | } | ||
| 468 | |||
| 469 | rc = -ENOMEM; | ||
| 470 | einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); | ||
| 471 | if (!einj_debug_dir) | ||
| 472 | goto err_cleanup; | ||
| 473 | fentry = debugfs_create_file("available_error_type", S_IRUSR, | ||
| 474 | einj_debug_dir, NULL, | ||
| 475 | &available_error_type_fops); | ||
| 476 | if (!fentry) | ||
| 477 | goto err_cleanup; | ||
| 478 | fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, | ||
| 479 | einj_debug_dir, NULL, &error_type_fops); | ||
| 480 | if (!fentry) | ||
| 481 | goto err_cleanup; | ||
| 482 | fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, | ||
| 483 | einj_debug_dir, &error_param1); | ||
| 484 | if (!fentry) | ||
| 485 | goto err_cleanup; | ||
| 486 | fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, | ||
| 487 | einj_debug_dir, &error_param2); | ||
| 488 | if (!fentry) | ||
| 489 | goto err_cleanup; | ||
| 490 | fentry = debugfs_create_file("error_inject", S_IWUSR, | ||
| 491 | einj_debug_dir, NULL, &error_inject_fops); | ||
| 492 | if (!fentry) | ||
| 493 | goto err_cleanup; | ||
| 494 | |||
| 495 | apei_resources_init(&einj_resources); | ||
| 496 | einj_exec_ctx_init(&ctx); | ||
| 497 | rc = apei_exec_collect_resources(&ctx, &einj_resources); | ||
| 498 | if (rc) | ||
| 499 | goto err_fini; | ||
| 500 | rc = apei_resources_request(&einj_resources, "APEI EINJ"); | ||
| 501 | if (rc) | ||
| 502 | goto err_fini; | ||
| 503 | rc = apei_exec_pre_map_gars(&ctx); | ||
| 504 | if (rc) | ||
| 505 | goto err_release; | ||
| 506 | param_paddr = einj_get_parameter_address(); | ||
| 507 | if (param_paddr) { | ||
| 508 | einj_param = ioremap(param_paddr, sizeof(*einj_param)); | ||
| 509 | rc = -ENOMEM; | ||
| 510 | if (!einj_param) | ||
| 511 | goto err_unmap; | ||
| 512 | } | ||
| 513 | |||
| 514 | pr_info(EINJ_PFX "Error INJection is initialized.\n"); | ||
| 515 | |||
| 516 | return 0; | ||
| 517 | |||
| 518 | err_unmap: | ||
| 519 | apei_exec_post_unmap_gars(&ctx); | ||
| 520 | err_release: | ||
| 521 | apei_resources_release(&einj_resources); | ||
| 522 | err_fini: | ||
| 523 | apei_resources_fini(&einj_resources); | ||
| 524 | err_cleanup: | ||
| 525 | debugfs_remove_recursive(einj_debug_dir); | ||
| 526 | |||
| 527 | return rc; | ||
| 528 | } | ||
| 529 | |||
| 530 | static void __exit einj_exit(void) | ||
| 531 | { | ||
| 532 | struct apei_exec_context ctx; | ||
| 533 | |||
| 534 | if (einj_param) | ||
| 535 | iounmap(einj_param); | ||
| 536 | einj_exec_ctx_init(&ctx); | ||
| 537 | apei_exec_post_unmap_gars(&ctx); | ||
| 538 | apei_resources_release(&einj_resources); | ||
| 539 | apei_resources_fini(&einj_resources); | ||
| 540 | debugfs_remove_recursive(einj_debug_dir); | ||
| 541 | } | ||
| 542 | |||
| 543 | module_init(einj_init); | ||
| 544 | module_exit(einj_exit); | ||
| 545 | |||
| 546 | MODULE_AUTHOR("Huang Ying"); | ||
| 547 | MODULE_DESCRIPTION("APEI Error INJection support"); | ||
| 548 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c new file mode 100644 index 000000000000..2ebc39115507 --- /dev/null +++ b/drivers/acpi/apei/erst.c | |||
| @@ -0,0 +1,855 @@ | |||
| 1 | /* | ||
| 2 | * APEI Error Record Serialization Table support | ||
| 3 | * | ||
| 4 | * ERST is a way provided by APEI to save and retrieve hardware error | ||
| 5 | * infomation to and from a persistent store. | ||
| 6 | * | ||
| 7 | * For more information about ERST, please refer to ACPI Specification | ||
| 8 | * version 4.0, section 17.4. | ||
| 9 | * | ||
| 10 | * Copyright 2010 Intel Corp. | ||
| 11 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or | ||
| 14 | * modify it under the terms of the GNU General Public License version | ||
| 15 | * 2 as published by the Free Software Foundation. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope that it will be useful, | ||
| 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | * GNU General Public License for more details. | ||
| 21 | * | ||
| 22 | * You should have received a copy of the GNU General Public License | ||
| 23 | * along with this program; if not, write to the Free Software | ||
| 24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/io.h> | ||
| 32 | #include <linux/acpi.h> | ||
| 33 | #include <linux/uaccess.h> | ||
| 34 | #include <linux/cper.h> | ||
| 35 | #include <linux/nmi.h> | ||
| 36 | #include <acpi/apei.h> | ||
| 37 | |||
| 38 | #include "apei-internal.h" | ||
| 39 | |||
| 40 | #define ERST_PFX "ERST: " | ||
| 41 | |||
| 42 | /* ERST command status */ | ||
| 43 | #define ERST_STATUS_SUCCESS 0x0 | ||
| 44 | #define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 | ||
| 45 | #define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 | ||
| 46 | #define ERST_STATUS_FAILED 0x3 | ||
| 47 | #define ERST_STATUS_RECORD_STORE_EMPTY 0x4 | ||
| 48 | #define ERST_STATUS_RECORD_NOT_FOUND 0x5 | ||
| 49 | |||
| 50 | #define ERST_TAB_ENTRY(tab) \ | ||
| 51 | ((struct acpi_whea_header *)((char *)(tab) + \ | ||
| 52 | sizeof(struct acpi_table_erst))) | ||
| 53 | |||
| 54 | #define SPIN_UNIT 100 /* 100ns */ | ||
| 55 | /* Firmware should respond within 1 miliseconds */ | ||
| 56 | #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) | ||
| 57 | #define FIRMWARE_MAX_STALL 50 /* 50us */ | ||
| 58 | |||
| 59 | int erst_disable; | ||
| 60 | EXPORT_SYMBOL_GPL(erst_disable); | ||
| 61 | |||
| 62 | static struct acpi_table_erst *erst_tab; | ||
| 63 | |||
| 64 | /* ERST Error Log Address Range atrributes */ | ||
| 65 | #define ERST_RANGE_RESERVED 0x0001 | ||
| 66 | #define ERST_RANGE_NVRAM 0x0002 | ||
| 67 | #define ERST_RANGE_SLOW 0x0004 | ||
| 68 | |||
| 69 | /* | ||
| 70 | * ERST Error Log Address Range, used as buffer for reading/writing | ||
| 71 | * error records. | ||
| 72 | */ | ||
| 73 | static struct erst_erange { | ||
| 74 | u64 base; | ||
| 75 | u64 size; | ||
| 76 | void __iomem *vaddr; | ||
| 77 | u32 attr; | ||
| 78 | } erst_erange; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Prevent ERST interpreter to run simultaneously, because the | ||
| 82 | * corresponding firmware implementation may not work properly when | ||
| 83 | * invoked simultaneously. | ||
| 84 | * | ||
| 85 | * It is used to provide exclusive accessing for ERST Error Log | ||
| 86 | * Address Range too. | ||
| 87 | */ | ||
| 88 | static DEFINE_SPINLOCK(erst_lock); | ||
| 89 | |||
| 90 | static inline int erst_errno(int command_status) | ||
| 91 | { | ||
| 92 | switch (command_status) { | ||
| 93 | case ERST_STATUS_SUCCESS: | ||
| 94 | return 0; | ||
| 95 | case ERST_STATUS_HARDWARE_NOT_AVAILABLE: | ||
| 96 | return -ENODEV; | ||
| 97 | case ERST_STATUS_NOT_ENOUGH_SPACE: | ||
| 98 | return -ENOSPC; | ||
| 99 | case ERST_STATUS_RECORD_STORE_EMPTY: | ||
| 100 | case ERST_STATUS_RECORD_NOT_FOUND: | ||
| 101 | return -ENOENT; | ||
| 102 | default: | ||
| 103 | return -EINVAL; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | static int erst_timedout(u64 *t, u64 spin_unit) | ||
| 108 | { | ||
| 109 | if ((s64)*t < spin_unit) { | ||
| 110 | pr_warning(FW_WARN ERST_PFX | ||
| 111 | "Firmware does not respond in time\n"); | ||
| 112 | return 1; | ||
| 113 | } | ||
| 114 | *t -= spin_unit; | ||
| 115 | ndelay(spin_unit); | ||
| 116 | touch_nmi_watchdog(); | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | static int erst_exec_load_var1(struct apei_exec_context *ctx, | ||
| 121 | struct acpi_whea_header *entry) | ||
| 122 | { | ||
| 123 | return __apei_exec_read_register(entry, &ctx->var1); | ||
| 124 | } | ||
| 125 | |||
| 126 | static int erst_exec_load_var2(struct apei_exec_context *ctx, | ||
| 127 | struct acpi_whea_header *entry) | ||
| 128 | { | ||
| 129 | return __apei_exec_read_register(entry, &ctx->var2); | ||
| 130 | } | ||
| 131 | |||
| 132 | static int erst_exec_store_var1(struct apei_exec_context *ctx, | ||
| 133 | struct acpi_whea_header *entry) | ||
| 134 | { | ||
| 135 | return __apei_exec_write_register(entry, ctx->var1); | ||
| 136 | } | ||
| 137 | |||
| 138 | static int erst_exec_add(struct apei_exec_context *ctx, | ||
| 139 | struct acpi_whea_header *entry) | ||
| 140 | { | ||
| 141 | ctx->var1 += ctx->var2; | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | static int erst_exec_subtract(struct apei_exec_context *ctx, | ||
| 146 | struct acpi_whea_header *entry) | ||
| 147 | { | ||
| 148 | ctx->var1 -= ctx->var2; | ||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 152 | static int erst_exec_add_value(struct apei_exec_context *ctx, | ||
| 153 | struct acpi_whea_header *entry) | ||
| 154 | { | ||
| 155 | int rc; | ||
| 156 | u64 val; | ||
| 157 | |||
| 158 | rc = __apei_exec_read_register(entry, &val); | ||
| 159 | if (rc) | ||
| 160 | return rc; | ||
| 161 | val += ctx->value; | ||
| 162 | rc = __apei_exec_write_register(entry, val); | ||
| 163 | return rc; | ||
| 164 | } | ||
| 165 | |||
| 166 | static int erst_exec_subtract_value(struct apei_exec_context *ctx, | ||
| 167 | struct acpi_whea_header *entry) | ||
| 168 | { | ||
| 169 | int rc; | ||
| 170 | u64 val; | ||
| 171 | |||
| 172 | rc = __apei_exec_read_register(entry, &val); | ||
| 173 | if (rc) | ||
| 174 | return rc; | ||
| 175 | val -= ctx->value; | ||
| 176 | rc = __apei_exec_write_register(entry, val); | ||
| 177 | return rc; | ||
| 178 | } | ||
| 179 | |||
| 180 | static int erst_exec_stall(struct apei_exec_context *ctx, | ||
| 181 | struct acpi_whea_header *entry) | ||
| 182 | { | ||
| 183 | u64 stall_time; | ||
| 184 | |||
| 185 | if (ctx->value > FIRMWARE_MAX_STALL) { | ||
| 186 | if (!in_nmi()) | ||
| 187 | pr_warning(FW_WARN ERST_PFX | ||
| 188 | "Too long stall time for stall instruction: %llx.\n", | ||
| 189 | ctx->value); | ||
| 190 | stall_time = FIRMWARE_MAX_STALL; | ||
| 191 | } else | ||
| 192 | stall_time = ctx->value; | ||
| 193 | udelay(stall_time); | ||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | static int erst_exec_stall_while_true(struct apei_exec_context *ctx, | ||
| 198 | struct acpi_whea_header *entry) | ||
| 199 | { | ||
| 200 | int rc; | ||
| 201 | u64 val; | ||
| 202 | u64 timeout = FIRMWARE_TIMEOUT; | ||
| 203 | u64 stall_time; | ||
| 204 | |||
| 205 | if (ctx->var1 > FIRMWARE_MAX_STALL) { | ||
| 206 | if (!in_nmi()) | ||
| 207 | pr_warning(FW_WARN ERST_PFX | ||
| 208 | "Too long stall time for stall while true instruction: %llx.\n", | ||
| 209 | ctx->var1); | ||
| 210 | stall_time = FIRMWARE_MAX_STALL; | ||
| 211 | } else | ||
| 212 | stall_time = ctx->var1; | ||
| 213 | |||
| 214 | for (;;) { | ||
| 215 | rc = __apei_exec_read_register(entry, &val); | ||
| 216 | if (rc) | ||
| 217 | return rc; | ||
| 218 | if (val != ctx->value) | ||
| 219 | break; | ||
| 220 | if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC)) | ||
| 221 | return -EIO; | ||
| 222 | } | ||
| 223 | return 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | static int erst_exec_skip_next_instruction_if_true( | ||
| 227 | struct apei_exec_context *ctx, | ||
| 228 | struct acpi_whea_header *entry) | ||
| 229 | { | ||
| 230 | int rc; | ||
| 231 | u64 val; | ||
| 232 | |||
| 233 | rc = __apei_exec_read_register(entry, &val); | ||
| 234 | if (rc) | ||
| 235 | return rc; | ||
| 236 | if (val == ctx->value) { | ||
| 237 | ctx->ip += 2; | ||
| 238 | return APEI_EXEC_SET_IP; | ||
| 239 | } | ||
| 240 | |||
| 241 | return 0; | ||
| 242 | } | ||
| 243 | |||
| 244 | static int erst_exec_goto(struct apei_exec_context *ctx, | ||
| 245 | struct acpi_whea_header *entry) | ||
| 246 | { | ||
| 247 | ctx->ip = ctx->value; | ||
| 248 | return APEI_EXEC_SET_IP; | ||
| 249 | } | ||
| 250 | |||
| 251 | static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, | ||
| 252 | struct acpi_whea_header *entry) | ||
| 253 | { | ||
| 254 | return __apei_exec_read_register(entry, &ctx->src_base); | ||
| 255 | } | ||
| 256 | |||
| 257 | static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, | ||
| 258 | struct acpi_whea_header *entry) | ||
| 259 | { | ||
| 260 | return __apei_exec_read_register(entry, &ctx->dst_base); | ||
| 261 | } | ||
| 262 | |||
| 263 | static int erst_exec_move_data(struct apei_exec_context *ctx, | ||
| 264 | struct acpi_whea_header *entry) | ||
| 265 | { | ||
| 266 | int rc; | ||
| 267 | u64 offset; | ||
| 268 | |||
| 269 | rc = __apei_exec_read_register(entry, &offset); | ||
| 270 | if (rc) | ||
| 271 | return rc; | ||
| 272 | memmove((void *)ctx->dst_base + offset, | ||
| 273 | (void *)ctx->src_base + offset, | ||
| 274 | ctx->var2); | ||
| 275 | |||
| 276 | return 0; | ||
| 277 | } | ||
| 278 | |||
| 279 | static struct apei_exec_ins_type erst_ins_type[] = { | ||
| 280 | [ACPI_ERST_READ_REGISTER] = { | ||
| 281 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 282 | .run = apei_exec_read_register, | ||
| 283 | }, | ||
| 284 | [ACPI_ERST_READ_REGISTER_VALUE] = { | ||
| 285 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 286 | .run = apei_exec_read_register_value, | ||
| 287 | }, | ||
| 288 | [ACPI_ERST_WRITE_REGISTER] = { | ||
| 289 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 290 | .run = apei_exec_write_register, | ||
| 291 | }, | ||
| 292 | [ACPI_ERST_WRITE_REGISTER_VALUE] = { | ||
| 293 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 294 | .run = apei_exec_write_register_value, | ||
| 295 | }, | ||
| 296 | [ACPI_ERST_NOOP] = { | ||
| 297 | .flags = 0, | ||
| 298 | .run = apei_exec_noop, | ||
| 299 | }, | ||
| 300 | [ACPI_ERST_LOAD_VAR1] = { | ||
| 301 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 302 | .run = erst_exec_load_var1, | ||
| 303 | }, | ||
| 304 | [ACPI_ERST_LOAD_VAR2] = { | ||
| 305 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 306 | .run = erst_exec_load_var2, | ||
| 307 | }, | ||
| 308 | [ACPI_ERST_STORE_VAR1] = { | ||
| 309 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 310 | .run = erst_exec_store_var1, | ||
| 311 | }, | ||
| 312 | [ACPI_ERST_ADD] = { | ||
| 313 | .flags = 0, | ||
| 314 | .run = erst_exec_add, | ||
| 315 | }, | ||
| 316 | [ACPI_ERST_SUBTRACT] = { | ||
| 317 | .flags = 0, | ||
| 318 | .run = erst_exec_subtract, | ||
| 319 | }, | ||
| 320 | [ACPI_ERST_ADD_VALUE] = { | ||
| 321 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 322 | .run = erst_exec_add_value, | ||
| 323 | }, | ||
| 324 | [ACPI_ERST_SUBTRACT_VALUE] = { | ||
| 325 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 326 | .run = erst_exec_subtract_value, | ||
| 327 | }, | ||
| 328 | [ACPI_ERST_STALL] = { | ||
| 329 | .flags = 0, | ||
| 330 | .run = erst_exec_stall, | ||
| 331 | }, | ||
| 332 | [ACPI_ERST_STALL_WHILE_TRUE] = { | ||
| 333 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 334 | .run = erst_exec_stall_while_true, | ||
| 335 | }, | ||
| 336 | [ACPI_ERST_SKIP_NEXT_IF_TRUE] = { | ||
| 337 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 338 | .run = erst_exec_skip_next_instruction_if_true, | ||
| 339 | }, | ||
| 340 | [ACPI_ERST_GOTO] = { | ||
| 341 | .flags = 0, | ||
| 342 | .run = erst_exec_goto, | ||
| 343 | }, | ||
| 344 | [ACPI_ERST_SET_SRC_ADDRESS_BASE] = { | ||
| 345 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 346 | .run = erst_exec_set_src_address_base, | ||
| 347 | }, | ||
| 348 | [ACPI_ERST_SET_DST_ADDRESS_BASE] = { | ||
| 349 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 350 | .run = erst_exec_set_dst_address_base, | ||
| 351 | }, | ||
| 352 | [ACPI_ERST_MOVE_DATA] = { | ||
| 353 | .flags = APEI_EXEC_INS_ACCESS_REGISTER, | ||
| 354 | .run = erst_exec_move_data, | ||
| 355 | }, | ||
| 356 | }; | ||
| 357 | |||
| 358 | static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) | ||
| 359 | { | ||
| 360 | apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), | ||
| 361 | ERST_TAB_ENTRY(erst_tab), erst_tab->entries); | ||
| 362 | } | ||
| 363 | |||
| 364 | static int erst_get_erange(struct erst_erange *range) | ||
| 365 | { | ||
| 366 | struct apei_exec_context ctx; | ||
| 367 | int rc; | ||
| 368 | |||
| 369 | erst_exec_ctx_init(&ctx); | ||
| 370 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); | ||
| 371 | if (rc) | ||
| 372 | return rc; | ||
| 373 | range->base = apei_exec_ctx_get_output(&ctx); | ||
| 374 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); | ||
| 375 | if (rc) | ||
| 376 | return rc; | ||
| 377 | range->size = apei_exec_ctx_get_output(&ctx); | ||
| 378 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); | ||
| 379 | if (rc) | ||
| 380 | return rc; | ||
| 381 | range->attr = apei_exec_ctx_get_output(&ctx); | ||
| 382 | |||
| 383 | return 0; | ||
| 384 | } | ||
| 385 | |||
| 386 | static ssize_t __erst_get_record_count(void) | ||
| 387 | { | ||
| 388 | struct apei_exec_context ctx; | ||
| 389 | int rc; | ||
| 390 | |||
| 391 | erst_exec_ctx_init(&ctx); | ||
| 392 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); | ||
| 393 | if (rc) | ||
| 394 | return rc; | ||
| 395 | return apei_exec_ctx_get_output(&ctx); | ||
| 396 | } | ||
| 397 | |||
| 398 | ssize_t erst_get_record_count(void) | ||
| 399 | { | ||
| 400 | ssize_t count; | ||
| 401 | unsigned long flags; | ||
| 402 | |||
| 403 | if (erst_disable) | ||
| 404 | return -ENODEV; | ||
| 405 | |||
| 406 | spin_lock_irqsave(&erst_lock, flags); | ||
| 407 | count = __erst_get_record_count(); | ||
| 408 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 409 | |||
| 410 | return count; | ||
| 411 | } | ||
| 412 | EXPORT_SYMBOL_GPL(erst_get_record_count); | ||
| 413 | |||
| 414 | static int __erst_get_next_record_id(u64 *record_id) | ||
| 415 | { | ||
| 416 | struct apei_exec_context ctx; | ||
| 417 | int rc; | ||
| 418 | |||
| 419 | erst_exec_ctx_init(&ctx); | ||
| 420 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); | ||
| 421 | if (rc) | ||
| 422 | return rc; | ||
| 423 | *record_id = apei_exec_ctx_get_output(&ctx); | ||
| 424 | |||
| 425 | return 0; | ||
| 426 | } | ||
| 427 | |||
| 428 | /* | ||
| 429 | * Get the record ID of an existing error record on the persistent | ||
| 430 | * storage. If there is no error record on the persistent storage, the | ||
| 431 | * returned record_id is APEI_ERST_INVALID_RECORD_ID. | ||
| 432 | */ | ||
| 433 | int erst_get_next_record_id(u64 *record_id) | ||
| 434 | { | ||
| 435 | int rc; | ||
| 436 | unsigned long flags; | ||
| 437 | |||
| 438 | if (erst_disable) | ||
| 439 | return -ENODEV; | ||
| 440 | |||
| 441 | spin_lock_irqsave(&erst_lock, flags); | ||
| 442 | rc = __erst_get_next_record_id(record_id); | ||
| 443 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 444 | |||
| 445 | return rc; | ||
| 446 | } | ||
| 447 | EXPORT_SYMBOL_GPL(erst_get_next_record_id); | ||
| 448 | |||
| 449 | static int __erst_write_to_storage(u64 offset) | ||
| 450 | { | ||
| 451 | struct apei_exec_context ctx; | ||
| 452 | u64 timeout = FIRMWARE_TIMEOUT; | ||
| 453 | u64 val; | ||
| 454 | int rc; | ||
| 455 | |||
| 456 | erst_exec_ctx_init(&ctx); | ||
| 457 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); | ||
| 458 | if (rc) | ||
| 459 | return rc; | ||
| 460 | apei_exec_ctx_set_input(&ctx, offset); | ||
| 461 | rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); | ||
| 462 | if (rc) | ||
| 463 | return rc; | ||
| 464 | rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); | ||
| 465 | if (rc) | ||
| 466 | return rc; | ||
| 467 | for (;;) { | ||
| 468 | rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); | ||
| 469 | if (rc) | ||
| 470 | return rc; | ||
| 471 | val = apei_exec_ctx_get_output(&ctx); | ||
| 472 | if (!val) | ||
| 473 | break; | ||
| 474 | if (erst_timedout(&timeout, SPIN_UNIT)) | ||
| 475 | return -EIO; | ||
| 476 | } | ||
| 477 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); | ||
| 478 | if (rc) | ||
| 479 | return rc; | ||
| 480 | val = apei_exec_ctx_get_output(&ctx); | ||
| 481 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | ||
| 482 | if (rc) | ||
| 483 | return rc; | ||
| 484 | |||
| 485 | return erst_errno(val); | ||
| 486 | } | ||
| 487 | |||
| 488 | static int __erst_read_from_storage(u64 record_id, u64 offset) | ||
| 489 | { | ||
| 490 | struct apei_exec_context ctx; | ||
| 491 | u64 timeout = FIRMWARE_TIMEOUT; | ||
| 492 | u64 val; | ||
| 493 | int rc; | ||
| 494 | |||
| 495 | erst_exec_ctx_init(&ctx); | ||
| 496 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); | ||
| 497 | if (rc) | ||
| 498 | return rc; | ||
| 499 | apei_exec_ctx_set_input(&ctx, offset); | ||
| 500 | rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); | ||
| 501 | if (rc) | ||
| 502 | return rc; | ||
| 503 | apei_exec_ctx_set_input(&ctx, record_id); | ||
| 504 | rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); | ||
| 505 | if (rc) | ||
| 506 | return rc; | ||
| 507 | rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); | ||
| 508 | if (rc) | ||
| 509 | return rc; | ||
| 510 | for (;;) { | ||
| 511 | rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); | ||
| 512 | if (rc) | ||
| 513 | return rc; | ||
| 514 | val = apei_exec_ctx_get_output(&ctx); | ||
| 515 | if (!val) | ||
| 516 | break; | ||
| 517 | if (erst_timedout(&timeout, SPIN_UNIT)) | ||
| 518 | return -EIO; | ||
| 519 | }; | ||
| 520 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); | ||
| 521 | if (rc) | ||
| 522 | return rc; | ||
| 523 | val = apei_exec_ctx_get_output(&ctx); | ||
| 524 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | ||
| 525 | if (rc) | ||
| 526 | return rc; | ||
| 527 | |||
| 528 | return erst_errno(val); | ||
| 529 | } | ||
| 530 | |||
| 531 | static int __erst_clear_from_storage(u64 record_id) | ||
| 532 | { | ||
| 533 | struct apei_exec_context ctx; | ||
| 534 | u64 timeout = FIRMWARE_TIMEOUT; | ||
| 535 | u64 val; | ||
| 536 | int rc; | ||
| 537 | |||
| 538 | erst_exec_ctx_init(&ctx); | ||
| 539 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); | ||
| 540 | if (rc) | ||
| 541 | return rc; | ||
| 542 | apei_exec_ctx_set_input(&ctx, record_id); | ||
| 543 | rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); | ||
| 544 | if (rc) | ||
| 545 | return rc; | ||
| 546 | rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); | ||
| 547 | if (rc) | ||
| 548 | return rc; | ||
| 549 | for (;;) { | ||
| 550 | rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); | ||
| 551 | if (rc) | ||
| 552 | return rc; | ||
| 553 | val = apei_exec_ctx_get_output(&ctx); | ||
| 554 | if (!val) | ||
| 555 | break; | ||
| 556 | if (erst_timedout(&timeout, SPIN_UNIT)) | ||
| 557 | return -EIO; | ||
| 558 | } | ||
| 559 | rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); | ||
| 560 | if (rc) | ||
| 561 | return rc; | ||
| 562 | val = apei_exec_ctx_get_output(&ctx); | ||
| 563 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | ||
| 564 | if (rc) | ||
| 565 | return rc; | ||
| 566 | |||
| 567 | return erst_errno(val); | ||
| 568 | } | ||
| 569 | |||
| 570 | /* NVRAM ERST Error Log Address Range is not supported yet */ | ||
| 571 | static void pr_unimpl_nvram(void) | ||
| 572 | { | ||
| 573 | if (printk_ratelimit()) | ||
| 574 | pr_warning(ERST_PFX | ||
| 575 | "NVRAM ERST Log Address Range is not implemented yet\n"); | ||
| 576 | } | ||
| 577 | |||
| 578 | static int __erst_write_to_nvram(const struct cper_record_header *record) | ||
| 579 | { | ||
| 580 | /* do not print message, because printk is not safe for NMI */ | ||
| 581 | return -ENOSYS; | ||
| 582 | } | ||
| 583 | |||
| 584 | static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset) | ||
| 585 | { | ||
| 586 | pr_unimpl_nvram(); | ||
| 587 | return -ENOSYS; | ||
| 588 | } | ||
| 589 | |||
| 590 | static int __erst_clear_from_nvram(u64 record_id) | ||
| 591 | { | ||
| 592 | pr_unimpl_nvram(); | ||
| 593 | return -ENOSYS; | ||
| 594 | } | ||
| 595 | |||
| 596 | int erst_write(const struct cper_record_header *record) | ||
| 597 | { | ||
| 598 | int rc; | ||
| 599 | unsigned long flags; | ||
| 600 | struct cper_record_header *rcd_erange; | ||
| 601 | |||
| 602 | if (erst_disable) | ||
| 603 | return -ENODEV; | ||
| 604 | |||
| 605 | if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE)) | ||
| 606 | return -EINVAL; | ||
| 607 | |||
| 608 | if (erst_erange.attr & ERST_RANGE_NVRAM) { | ||
| 609 | if (!spin_trylock_irqsave(&erst_lock, flags)) | ||
| 610 | return -EBUSY; | ||
| 611 | rc = __erst_write_to_nvram(record); | ||
| 612 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 613 | return rc; | ||
| 614 | } | ||
| 615 | |||
| 616 | if (record->record_length > erst_erange.size) | ||
| 617 | return -EINVAL; | ||
| 618 | |||
| 619 | if (!spin_trylock_irqsave(&erst_lock, flags)) | ||
| 620 | return -EBUSY; | ||
| 621 | memcpy(erst_erange.vaddr, record, record->record_length); | ||
| 622 | rcd_erange = erst_erange.vaddr; | ||
| 623 | /* signature for serialization system */ | ||
| 624 | memcpy(&rcd_erange->persistence_information, "ER", 2); | ||
| 625 | |||
| 626 | rc = __erst_write_to_storage(0); | ||
| 627 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 628 | |||
| 629 | return rc; | ||
| 630 | } | ||
| 631 | EXPORT_SYMBOL_GPL(erst_write); | ||
| 632 | |||
| 633 | static int __erst_read_to_erange(u64 record_id, u64 *offset) | ||
| 634 | { | ||
| 635 | int rc; | ||
| 636 | |||
| 637 | if (erst_erange.attr & ERST_RANGE_NVRAM) | ||
| 638 | return __erst_read_to_erange_from_nvram( | ||
| 639 | record_id, offset); | ||
| 640 | |||
| 641 | rc = __erst_read_from_storage(record_id, 0); | ||
| 642 | if (rc) | ||
| 643 | return rc; | ||
| 644 | *offset = 0; | ||
| 645 | |||
| 646 | return 0; | ||
| 647 | } | ||
| 648 | |||
| 649 | static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, | ||
| 650 | size_t buflen) | ||
| 651 | { | ||
| 652 | int rc; | ||
| 653 | u64 offset, len = 0; | ||
| 654 | struct cper_record_header *rcd_tmp; | ||
| 655 | |||
| 656 | rc = __erst_read_to_erange(record_id, &offset); | ||
| 657 | if (rc) | ||
| 658 | return rc; | ||
| 659 | rcd_tmp = erst_erange.vaddr + offset; | ||
| 660 | len = rcd_tmp->record_length; | ||
| 661 | if (len <= buflen) | ||
| 662 | memcpy(record, rcd_tmp, len); | ||
| 663 | |||
| 664 | return len; | ||
| 665 | } | ||
| 666 | |||
| 667 | /* | ||
| 668 | * If return value > buflen, the buffer size is not big enough, | ||
| 669 | * else if return value < 0, something goes wrong, | ||
| 670 | * else everything is OK, and return value is record length | ||
| 671 | */ | ||
| 672 | ssize_t erst_read(u64 record_id, struct cper_record_header *record, | ||
| 673 | size_t buflen) | ||
| 674 | { | ||
| 675 | ssize_t len; | ||
| 676 | unsigned long flags; | ||
| 677 | |||
| 678 | if (erst_disable) | ||
| 679 | return -ENODEV; | ||
| 680 | |||
| 681 | spin_lock_irqsave(&erst_lock, flags); | ||
| 682 | len = __erst_read(record_id, record, buflen); | ||
| 683 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 684 | return len; | ||
| 685 | } | ||
| 686 | EXPORT_SYMBOL_GPL(erst_read); | ||
| 687 | |||
| 688 | /* | ||
| 689 | * If return value > buflen, the buffer size is not big enough, | ||
| 690 | * else if return value = 0, there is no more record to read, | ||
| 691 | * else if return value < 0, something goes wrong, | ||
| 692 | * else everything is OK, and return value is record length | ||
| 693 | */ | ||
| 694 | ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) | ||
| 695 | { | ||
| 696 | int rc; | ||
| 697 | ssize_t len; | ||
| 698 | unsigned long flags; | ||
| 699 | u64 record_id; | ||
| 700 | |||
| 701 | if (erst_disable) | ||
| 702 | return -ENODEV; | ||
| 703 | |||
| 704 | spin_lock_irqsave(&erst_lock, flags); | ||
| 705 | rc = __erst_get_next_record_id(&record_id); | ||
| 706 | if (rc) { | ||
| 707 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 708 | return rc; | ||
| 709 | } | ||
| 710 | /* no more record */ | ||
| 711 | if (record_id == APEI_ERST_INVALID_RECORD_ID) { | ||
| 712 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | |||
| 716 | len = __erst_read(record_id, record, buflen); | ||
| 717 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 718 | |||
| 719 | return len; | ||
| 720 | } | ||
| 721 | EXPORT_SYMBOL_GPL(erst_read_next); | ||
| 722 | |||
| 723 | int erst_clear(u64 record_id) | ||
| 724 | { | ||
| 725 | int rc; | ||
| 726 | unsigned long flags; | ||
| 727 | |||
| 728 | if (erst_disable) | ||
| 729 | return -ENODEV; | ||
| 730 | |||
| 731 | spin_lock_irqsave(&erst_lock, flags); | ||
| 732 | if (erst_erange.attr & ERST_RANGE_NVRAM) | ||
| 733 | rc = __erst_clear_from_nvram(record_id); | ||
| 734 | else | ||
| 735 | rc = __erst_clear_from_storage(record_id); | ||
| 736 | spin_unlock_irqrestore(&erst_lock, flags); | ||
| 737 | |||
| 738 | return rc; | ||
| 739 | } | ||
| 740 | EXPORT_SYMBOL_GPL(erst_clear); | ||
| 741 | |||
| 742 | static int __init setup_erst_disable(char *str) | ||
| 743 | { | ||
| 744 | erst_disable = 1; | ||
| 745 | return 0; | ||
| 746 | } | ||
| 747 | |||
| 748 | __setup("erst_disable", setup_erst_disable); | ||
| 749 | |||
| 750 | static int erst_check_table(struct acpi_table_erst *erst_tab) | ||
| 751 | { | ||
| 752 | if (erst_tab->header_length != sizeof(struct acpi_table_erst)) | ||
| 753 | return -EINVAL; | ||
| 754 | if (erst_tab->header.length < sizeof(struct acpi_table_erst)) | ||
| 755 | return -EINVAL; | ||
| 756 | if (erst_tab->entries != | ||
| 757 | (erst_tab->header.length - sizeof(struct acpi_table_erst)) / | ||
| 758 | sizeof(struct acpi_erst_entry)) | ||
| 759 | return -EINVAL; | ||
| 760 | |||
| 761 | return 0; | ||
| 762 | } | ||
| 763 | |||
| 764 | static int __init erst_init(void) | ||
| 765 | { | ||
| 766 | int rc = 0; | ||
| 767 | acpi_status status; | ||
| 768 | struct apei_exec_context ctx; | ||
| 769 | struct apei_resources erst_resources; | ||
| 770 | struct resource *r; | ||
| 771 | |||
| 772 | if (acpi_disabled) | ||
| 773 | goto err; | ||
| 774 | |||
| 775 | if (erst_disable) { | ||
| 776 | pr_info(ERST_PFX | ||
| 777 | "Error Record Serialization Table (ERST) support is disabled.\n"); | ||
| 778 | goto err; | ||
| 779 | } | ||
| 780 | |||
| 781 | status = acpi_get_table(ACPI_SIG_ERST, 0, | ||
| 782 | (struct acpi_table_header **)&erst_tab); | ||
| 783 | if (status == AE_NOT_FOUND) { | ||
| 784 | pr_err(ERST_PFX "Table is not found!\n"); | ||
| 785 | goto err; | ||
| 786 | } else if (ACPI_FAILURE(status)) { | ||
| 787 | const char *msg = acpi_format_exception(status); | ||
| 788 | pr_err(ERST_PFX "Failed to get table, %s\n", msg); | ||
| 789 | rc = -EINVAL; | ||
| 790 | goto err; | ||
| 791 | } | ||
| 792 | |||
| 793 | rc = erst_check_table(erst_tab); | ||
| 794 | if (rc) { | ||
| 795 | pr_err(FW_BUG ERST_PFX "ERST table is invalid\n"); | ||
| 796 | goto err; | ||
| 797 | } | ||
| 798 | |||
| 799 | apei_resources_init(&erst_resources); | ||
| 800 | erst_exec_ctx_init(&ctx); | ||
| 801 | rc = apei_exec_collect_resources(&ctx, &erst_resources); | ||
| 802 | if (rc) | ||
| 803 | goto err_fini; | ||
| 804 | rc = apei_resources_request(&erst_resources, "APEI ERST"); | ||
| 805 | if (rc) | ||
| 806 | goto err_fini; | ||
| 807 | rc = apei_exec_pre_map_gars(&ctx); | ||
| 808 | if (rc) | ||
| 809 | goto err_release; | ||
| 810 | rc = erst_get_erange(&erst_erange); | ||
| 811 | if (rc) { | ||
| 812 | if (rc == -ENODEV) | ||
| 813 | pr_info(ERST_PFX | ||
| 814 | "The corresponding hardware device or firmware implementation " | ||
| 815 | "is not available.\n"); | ||
| 816 | else | ||
| 817 | pr_err(ERST_PFX | ||
| 818 | "Failed to get Error Log Address Range.\n"); | ||
| 819 | goto err_unmap_reg; | ||
| 820 | } | ||
| 821 | |||
| 822 | r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); | ||
| 823 | if (!r) { | ||
| 824 | pr_err(ERST_PFX | ||
| 825 | "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n", | ||
| 826 | (unsigned long long)erst_erange.base, | ||
| 827 | (unsigned long long)erst_erange.base + erst_erange.size); | ||
| 828 | rc = -EIO; | ||
| 829 | goto err_unmap_reg; | ||
| 830 | } | ||
| 831 | rc = -ENOMEM; | ||
| 832 | erst_erange.vaddr = ioremap_cache(erst_erange.base, | ||
| 833 | erst_erange.size); | ||
| 834 | if (!erst_erange.vaddr) | ||
| 835 | goto err_release_erange; | ||
| 836 | |||
| 837 | pr_info(ERST_PFX | ||
| 838 | "Error Record Serialization Table (ERST) support is initialized.\n"); | ||
| 839 | |||
| 840 | return 0; | ||
| 841 | |||
| 842 | err_release_erange: | ||
| 843 | release_mem_region(erst_erange.base, erst_erange.size); | ||
| 844 | err_unmap_reg: | ||
| 845 | apei_exec_post_unmap_gars(&ctx); | ||
| 846 | err_release: | ||
| 847 | apei_resources_release(&erst_resources); | ||
| 848 | err_fini: | ||
| 849 | apei_resources_fini(&erst_resources); | ||
| 850 | err: | ||
| 851 | erst_disable = 1; | ||
| 852 | return rc; | ||
| 853 | } | ||
| 854 | |||
| 855 | device_initcall(erst_init); | ||
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c new file mode 100644 index 000000000000..fd0cc016a099 --- /dev/null +++ b/drivers/acpi/apei/ghes.c | |||
| @@ -0,0 +1,427 @@ | |||
| 1 | /* | ||
| 2 | * APEI Generic Hardware Error Source support | ||
| 3 | * | ||
| 4 | * Generic Hardware Error Source provides a way to report platform | ||
| 5 | * hardware errors (such as that from chipset). It works in so called | ||
| 6 | * "Firmware First" mode, that is, hardware errors are reported to | ||
| 7 | * firmware firstly, then reported to Linux by firmware. This way, | ||
| 8 | * some non-standard hardware error registers or non-standard hardware | ||
| 9 | * link can be checked by firmware to produce more hardware error | ||
| 10 | * information for Linux. | ||
| 11 | * | ||
| 12 | * For more information about Generic Hardware Error Source, please | ||
| 13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 | ||
| 14 | * | ||
| 15 | * Now, only SCI notification type and memory errors are | ||
| 16 | * supported. More notification type and hardware error type will be | ||
| 17 | * added later. | ||
| 18 | * | ||
| 19 | * Copyright 2010 Intel Corp. | ||
| 20 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 21 | * | ||
| 22 | * This program is free software; you can redistribute it and/or | ||
| 23 | * modify it under the terms of the GNU General Public License version | ||
| 24 | * 2 as published by the Free Software Foundation; | ||
| 25 | * | ||
| 26 | * This program is distributed in the hope that it will be useful, | ||
| 27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 29 | * GNU General Public License for more details. | ||
| 30 | * | ||
| 31 | * You should have received a copy of the GNU General Public License | ||
| 32 | * along with this program; if not, write to the Free Software | ||
| 33 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/kernel.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | #include <linux/init.h> | ||
| 39 | #include <linux/acpi.h> | ||
| 40 | #include <linux/io.h> | ||
| 41 | #include <linux/interrupt.h> | ||
| 42 | #include <linux/cper.h> | ||
| 43 | #include <linux/kdebug.h> | ||
| 44 | #include <acpi/apei.h> | ||
| 45 | #include <acpi/atomicio.h> | ||
| 46 | #include <acpi/hed.h> | ||
| 47 | #include <asm/mce.h> | ||
| 48 | |||
| 49 | #include "apei-internal.h" | ||
| 50 | |||
| 51 | #define GHES_PFX "GHES: " | ||
| 52 | |||
| 53 | #define GHES_ESTATUS_MAX_SIZE 65536 | ||
| 54 | |||
| 55 | /* | ||
| 56 | * One struct ghes is created for each generic hardware error | ||
| 57 | * source. | ||
| 58 | * | ||
| 59 | * It provides the context for APEI hardware error timer/IRQ/SCI/NMI | ||
| 60 | * handler. Handler for one generic hardware error source is only | ||
| 61 | * triggered after the previous one is done. So handler can uses | ||
| 62 | * struct ghes without locking. | ||
| 63 | * | ||
| 64 | * estatus: memory buffer for error status block, allocated during | ||
| 65 | * HEST parsing. | ||
| 66 | */ | ||
| 67 | #define GHES_TO_CLEAR 0x0001 | ||
| 68 | |||
| 69 | struct ghes { | ||
| 70 | struct acpi_hest_generic *generic; | ||
| 71 | struct acpi_hest_generic_status *estatus; | ||
| 72 | struct list_head list; | ||
| 73 | u64 buffer_paddr; | ||
| 74 | unsigned long flags; | ||
| 75 | }; | ||
| 76 | |||
| 77 | /* | ||
| 78 | * Error source lists, one list for each notification method. The | ||
| 79 | * members in lists are struct ghes. | ||
| 80 | * | ||
| 81 | * The list members are only added in HEST parsing and deleted during | ||
| 82 | * module_exit, that is, single-threaded. So no lock is needed for | ||
| 83 | * that. | ||
| 84 | * | ||
| 85 | * But the mutual exclusion is needed between members adding/deleting | ||
| 86 | * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is | ||
| 87 | * used for that. | ||
| 88 | */ | ||
| 89 | static LIST_HEAD(ghes_sci); | ||
| 90 | |||
| 91 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) | ||
| 92 | { | ||
| 93 | struct ghes *ghes; | ||
| 94 | unsigned int error_block_length; | ||
| 95 | int rc; | ||
| 96 | |||
| 97 | ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); | ||
| 98 | if (!ghes) | ||
| 99 | return ERR_PTR(-ENOMEM); | ||
| 100 | ghes->generic = generic; | ||
| 101 | INIT_LIST_HEAD(&ghes->list); | ||
| 102 | rc = acpi_pre_map_gar(&generic->error_status_address); | ||
| 103 | if (rc) | ||
| 104 | goto err_free; | ||
| 105 | error_block_length = generic->error_block_length; | ||
| 106 | if (error_block_length > GHES_ESTATUS_MAX_SIZE) { | ||
| 107 | pr_warning(FW_WARN GHES_PFX | ||
| 108 | "Error status block length is too long: %u for " | ||
| 109 | "generic hardware error source: %d.\n", | ||
| 110 | error_block_length, generic->header.source_id); | ||
| 111 | error_block_length = GHES_ESTATUS_MAX_SIZE; | ||
| 112 | } | ||
| 113 | ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); | ||
| 114 | if (!ghes->estatus) { | ||
| 115 | rc = -ENOMEM; | ||
| 116 | goto err_unmap; | ||
| 117 | } | ||
| 118 | |||
| 119 | return ghes; | ||
| 120 | |||
| 121 | err_unmap: | ||
| 122 | acpi_post_unmap_gar(&generic->error_status_address); | ||
| 123 | err_free: | ||
| 124 | kfree(ghes); | ||
| 125 | return ERR_PTR(rc); | ||
| 126 | } | ||
| 127 | |||
| 128 | static void ghes_fini(struct ghes *ghes) | ||
| 129 | { | ||
| 130 | kfree(ghes->estatus); | ||
| 131 | acpi_post_unmap_gar(&ghes->generic->error_status_address); | ||
| 132 | } | ||
| 133 | |||
| 134 | enum { | ||
| 135 | GHES_SER_NO = 0x0, | ||
| 136 | GHES_SER_CORRECTED = 0x1, | ||
| 137 | GHES_SER_RECOVERABLE = 0x2, | ||
| 138 | GHES_SER_PANIC = 0x3, | ||
| 139 | }; | ||
| 140 | |||
| 141 | static inline int ghes_severity(int severity) | ||
| 142 | { | ||
| 143 | switch (severity) { | ||
| 144 | case CPER_SER_INFORMATIONAL: | ||
| 145 | return GHES_SER_NO; | ||
| 146 | case CPER_SER_CORRECTED: | ||
| 147 | return GHES_SER_CORRECTED; | ||
| 148 | case CPER_SER_RECOVERABLE: | ||
| 149 | return GHES_SER_RECOVERABLE; | ||
| 150 | case CPER_SER_FATAL: | ||
| 151 | return GHES_SER_PANIC; | ||
| 152 | default: | ||
| 153 | /* Unkown, go panic */ | ||
| 154 | return GHES_SER_PANIC; | ||
| 155 | } | ||
| 156 | } | ||
| 157 | |||
| 158 | /* SCI handler run in work queue, so ioremap can be used here */ | ||
| 159 | static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, | ||
| 160 | int from_phys) | ||
| 161 | { | ||
| 162 | void *vaddr; | ||
| 163 | |||
| 164 | vaddr = ioremap_cache(paddr, len); | ||
| 165 | if (!vaddr) | ||
| 166 | return -ENOMEM; | ||
| 167 | if (from_phys) | ||
| 168 | memcpy(buffer, vaddr, len); | ||
| 169 | else | ||
| 170 | memcpy(vaddr, buffer, len); | ||
| 171 | iounmap(vaddr); | ||
| 172 | |||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | static int ghes_read_estatus(struct ghes *ghes, int silent) | ||
| 177 | { | ||
| 178 | struct acpi_hest_generic *g = ghes->generic; | ||
| 179 | u64 buf_paddr; | ||
| 180 | u32 len; | ||
| 181 | int rc; | ||
| 182 | |||
| 183 | rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); | ||
| 184 | if (rc) { | ||
| 185 | if (!silent && printk_ratelimit()) | ||
| 186 | pr_warning(FW_WARN GHES_PFX | ||
| 187 | "Failed to read error status block address for hardware error source: %d.\n", | ||
| 188 | g->header.source_id); | ||
| 189 | return -EIO; | ||
| 190 | } | ||
| 191 | if (!buf_paddr) | ||
| 192 | return -ENOENT; | ||
| 193 | |||
| 194 | rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, | ||
| 195 | sizeof(*ghes->estatus), 1); | ||
| 196 | if (rc) | ||
| 197 | return rc; | ||
| 198 | if (!ghes->estatus->block_status) | ||
| 199 | return -ENOENT; | ||
| 200 | |||
| 201 | ghes->buffer_paddr = buf_paddr; | ||
| 202 | ghes->flags |= GHES_TO_CLEAR; | ||
| 203 | |||
| 204 | rc = -EIO; | ||
| 205 | len = apei_estatus_len(ghes->estatus); | ||
| 206 | if (len < sizeof(*ghes->estatus)) | ||
| 207 | goto err_read_block; | ||
| 208 | if (len > ghes->generic->error_block_length) | ||
| 209 | goto err_read_block; | ||
| 210 | if (apei_estatus_check_header(ghes->estatus)) | ||
| 211 | goto err_read_block; | ||
| 212 | rc = ghes_copy_tofrom_phys(ghes->estatus + 1, | ||
| 213 | buf_paddr + sizeof(*ghes->estatus), | ||
| 214 | len - sizeof(*ghes->estatus), 1); | ||
| 215 | if (rc) | ||
| 216 | return rc; | ||
| 217 | if (apei_estatus_check(ghes->estatus)) | ||
| 218 | goto err_read_block; | ||
| 219 | rc = 0; | ||
| 220 | |||
| 221 | err_read_block: | ||
| 222 | if (rc && !silent) | ||
| 223 | pr_warning(FW_WARN GHES_PFX | ||
| 224 | "Failed to read error status block!\n"); | ||
| 225 | return rc; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void ghes_clear_estatus(struct ghes *ghes) | ||
| 229 | { | ||
| 230 | ghes->estatus->block_status = 0; | ||
| 231 | if (!(ghes->flags & GHES_TO_CLEAR)) | ||
| 232 | return; | ||
| 233 | ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, | ||
| 234 | sizeof(ghes->estatus->block_status), 0); | ||
| 235 | ghes->flags &= ~GHES_TO_CLEAR; | ||
| 236 | } | ||
| 237 | |||
| 238 | static void ghes_do_proc(struct ghes *ghes) | ||
| 239 | { | ||
| 240 | int ser, processed = 0; | ||
| 241 | struct acpi_hest_generic_data *gdata; | ||
| 242 | |||
| 243 | ser = ghes_severity(ghes->estatus->error_severity); | ||
| 244 | apei_estatus_for_each_section(ghes->estatus, gdata) { | ||
| 245 | #ifdef CONFIG_X86_MCE | ||
| 246 | if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, | ||
| 247 | CPER_SEC_PLATFORM_MEM)) { | ||
| 248 | apei_mce_report_mem_error( | ||
| 249 | ser == GHES_SER_CORRECTED, | ||
| 250 | (struct cper_sec_mem_err *)(gdata+1)); | ||
| 251 | processed = 1; | ||
| 252 | } | ||
| 253 | #endif | ||
| 254 | } | ||
| 255 | |||
| 256 | if (!processed && printk_ratelimit()) | ||
| 257 | pr_warning(GHES_PFX | ||
| 258 | "Unknown error record from generic hardware error source: %d\n", | ||
| 259 | ghes->generic->header.source_id); | ||
| 260 | } | ||
| 261 | |||
| 262 | static int ghes_proc(struct ghes *ghes) | ||
| 263 | { | ||
| 264 | int rc; | ||
| 265 | |||
| 266 | rc = ghes_read_estatus(ghes, 0); | ||
| 267 | if (rc) | ||
| 268 | goto out; | ||
| 269 | ghes_do_proc(ghes); | ||
| 270 | |||
| 271 | out: | ||
| 272 | ghes_clear_estatus(ghes); | ||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | |||
| 276 | static int ghes_notify_sci(struct notifier_block *this, | ||
| 277 | unsigned long event, void *data) | ||
| 278 | { | ||
| 279 | struct ghes *ghes; | ||
| 280 | int ret = NOTIFY_DONE; | ||
| 281 | |||
| 282 | rcu_read_lock(); | ||
| 283 | list_for_each_entry_rcu(ghes, &ghes_sci, list) { | ||
| 284 | if (!ghes_proc(ghes)) | ||
| 285 | ret = NOTIFY_OK; | ||
| 286 | } | ||
| 287 | rcu_read_unlock(); | ||
| 288 | |||
| 289 | return ret; | ||
| 290 | } | ||
| 291 | |||
| 292 | static struct notifier_block ghes_notifier_sci = { | ||
| 293 | .notifier_call = ghes_notify_sci, | ||
| 294 | }; | ||
| 295 | |||
| 296 | static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data) | ||
| 297 | { | ||
| 298 | struct acpi_hest_generic *generic; | ||
| 299 | struct ghes *ghes = NULL; | ||
| 300 | int rc = 0; | ||
| 301 | |||
| 302 | if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) | ||
| 303 | return 0; | ||
| 304 | |||
| 305 | generic = (struct acpi_hest_generic *)hest_hdr; | ||
| 306 | if (!generic->enabled) | ||
| 307 | return 0; | ||
| 308 | |||
| 309 | if (generic->error_block_length < | ||
| 310 | sizeof(struct acpi_hest_generic_status)) { | ||
| 311 | pr_warning(FW_BUG GHES_PFX | ||
| 312 | "Invalid error block length: %u for generic hardware error source: %d\n", | ||
| 313 | generic->error_block_length, | ||
| 314 | generic->header.source_id); | ||
| 315 | goto err; | ||
| 316 | } | ||
| 317 | if (generic->records_to_preallocate == 0) { | ||
| 318 | pr_warning(FW_BUG GHES_PFX | ||
| 319 | "Invalid records to preallocate: %u for generic hardware error source: %d\n", | ||
| 320 | generic->records_to_preallocate, | ||
| 321 | generic->header.source_id); | ||
| 322 | goto err; | ||
| 323 | } | ||
| 324 | ghes = ghes_new(generic); | ||
| 325 | if (IS_ERR(ghes)) { | ||
| 326 | rc = PTR_ERR(ghes); | ||
| 327 | ghes = NULL; | ||
| 328 | goto err; | ||
| 329 | } | ||
| 330 | switch (generic->notify.type) { | ||
| 331 | case ACPI_HEST_NOTIFY_POLLED: | ||
| 332 | pr_warning(GHES_PFX | ||
| 333 | "Generic hardware error source: %d notified via POLL is not supported!\n", | ||
| 334 | generic->header.source_id); | ||
| 335 | break; | ||
| 336 | case ACPI_HEST_NOTIFY_EXTERNAL: | ||
| 337 | case ACPI_HEST_NOTIFY_LOCAL: | ||
| 338 | pr_warning(GHES_PFX | ||
| 339 | "Generic hardware error source: %d notified via IRQ is not supported!\n", | ||
| 340 | generic->header.source_id); | ||
| 341 | break; | ||
| 342 | case ACPI_HEST_NOTIFY_SCI: | ||
| 343 | if (list_empty(&ghes_sci)) | ||
| 344 | register_acpi_hed_notifier(&ghes_notifier_sci); | ||
| 345 | list_add_rcu(&ghes->list, &ghes_sci); | ||
| 346 | break; | ||
| 347 | case ACPI_HEST_NOTIFY_NMI: | ||
| 348 | pr_warning(GHES_PFX | ||
| 349 | "Generic hardware error source: %d notified via NMI is not supported!\n", | ||
| 350 | generic->header.source_id); | ||
| 351 | break; | ||
| 352 | default: | ||
| 353 | pr_warning(FW_WARN GHES_PFX | ||
| 354 | "Unknown notification type: %u for generic hardware error source: %d\n", | ||
| 355 | generic->notify.type, generic->header.source_id); | ||
| 356 | break; | ||
| 357 | } | ||
| 358 | |||
| 359 | return 0; | ||
| 360 | err: | ||
| 361 | if (ghes) | ||
| 362 | ghes_fini(ghes); | ||
| 363 | return rc; | ||
| 364 | } | ||
| 365 | |||
| 366 | static void ghes_cleanup(void) | ||
| 367 | { | ||
| 368 | struct ghes *ghes, *nghes; | ||
| 369 | |||
| 370 | if (!list_empty(&ghes_sci)) | ||
| 371 | unregister_acpi_hed_notifier(&ghes_notifier_sci); | ||
| 372 | |||
| 373 | synchronize_rcu(); | ||
| 374 | |||
| 375 | list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) { | ||
| 376 | list_del(&ghes->list); | ||
| 377 | ghes_fini(ghes); | ||
| 378 | kfree(ghes); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | |||
| 382 | static int __init ghes_init(void) | ||
| 383 | { | ||
| 384 | int rc; | ||
| 385 | |||
| 386 | if (acpi_disabled) | ||
| 387 | return -ENODEV; | ||
| 388 | |||
| 389 | if (hest_disable) { | ||
| 390 | pr_info(GHES_PFX "HEST is not enabled!\n"); | ||
| 391 | return -EINVAL; | ||
| 392 | } | ||
| 393 | |||
| 394 | rc = apei_hest_parse(hest_ghes_parse, NULL); | ||
| 395 | if (rc) { | ||
| 396 | pr_err(GHES_PFX | ||
| 397 | "Error during parsing HEST generic hardware error sources.\n"); | ||
| 398 | goto err_cleanup; | ||
| 399 | } | ||
| 400 | |||
| 401 | if (list_empty(&ghes_sci)) { | ||
| 402 | pr_info(GHES_PFX | ||
| 403 | "No functional generic hardware error sources.\n"); | ||
| 404 | rc = -ENODEV; | ||
| 405 | goto err_cleanup; | ||
| 406 | } | ||
| 407 | |||
| 408 | pr_info(GHES_PFX | ||
| 409 | "Generic Hardware Error Source support is initialized.\n"); | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | err_cleanup: | ||
| 413 | ghes_cleanup(); | ||
| 414 | return rc; | ||
| 415 | } | ||
| 416 | |||
| 417 | static void __exit ghes_exit(void) | ||
| 418 | { | ||
| 419 | ghes_cleanup(); | ||
| 420 | } | ||
| 421 | |||
| 422 | module_init(ghes_init); | ||
| 423 | module_exit(ghes_exit); | ||
| 424 | |||
| 425 | MODULE_AUTHOR("Huang Ying"); | ||
| 426 | MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); | ||
| 427 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c new file mode 100644 index 000000000000..e7f40d362cb3 --- /dev/null +++ b/drivers/acpi/apei/hest.c | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | /* | ||
| 2 | * APEI Hardware Error Souce Table support | ||
| 3 | * | ||
| 4 | * HEST describes error sources in detail; communicates operational | ||
| 5 | * parameters (i.e. severity levels, masking bits, and threshold | ||
| 6 | * values) to Linux as necessary. It also allows the BIOS to report | ||
| 7 | * non-standard error sources to Linux (for example, chipset-specific | ||
| 8 | * error registers). | ||
| 9 | * | ||
| 10 | * For more information about HEST, please refer to ACPI Specification | ||
| 11 | * version 4.0, section 17.3.2. | ||
| 12 | * | ||
| 13 | * Copyright 2009 Intel Corp. | ||
| 14 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 15 | * | ||
| 16 | * This program is free software; you can redistribute it and/or | ||
| 17 | * modify it under the terms of the GNU General Public License version | ||
| 18 | * 2 as published by the Free Software Foundation; | ||
| 19 | * | ||
| 20 | * This program is distributed in the hope that it will be useful, | ||
| 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 23 | * GNU General Public License for more details. | ||
| 24 | * | ||
| 25 | * You should have received a copy of the GNU General Public License | ||
| 26 | * along with this program; if not, write to the Free Software | ||
| 27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 28 | */ | ||
| 29 | |||
| 30 | #include <linux/kernel.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/init.h> | ||
| 33 | #include <linux/acpi.h> | ||
| 34 | #include <linux/kdebug.h> | ||
| 35 | #include <linux/highmem.h> | ||
| 36 | #include <linux/io.h> | ||
| 37 | #include <acpi/apei.h> | ||
| 38 | |||
| 39 | #include "apei-internal.h" | ||
| 40 | |||
| 41 | #define HEST_PFX "HEST: " | ||
| 42 | |||
| 43 | int hest_disable; | ||
| 44 | EXPORT_SYMBOL_GPL(hest_disable); | ||
| 45 | |||
| 46 | /* HEST table parsing */ | ||
| 47 | |||
| 48 | static struct acpi_table_hest *hest_tab; | ||
| 49 | |||
| 50 | static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data) | ||
| 51 | { | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { | ||
| 56 | [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ | ||
| 57 | [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, | ||
| 58 | [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), | ||
| 59 | [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), | ||
| 60 | [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), | ||
| 61 | [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), | ||
| 62 | [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), | ||
| 63 | }; | ||
| 64 | |||
| 65 | static int hest_esrc_len(struct acpi_hest_header *hest_hdr) | ||
| 66 | { | ||
| 67 | u16 hest_type = hest_hdr->type; | ||
| 68 | int len; | ||
| 69 | |||
| 70 | if (hest_type >= ACPI_HEST_TYPE_RESERVED) | ||
| 71 | return 0; | ||
| 72 | |||
| 73 | len = hest_esrc_len_tab[hest_type]; | ||
| 74 | |||
| 75 | if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) { | ||
| 76 | struct acpi_hest_ia_corrected *cmc; | ||
| 77 | cmc = (struct acpi_hest_ia_corrected *)hest_hdr; | ||
| 78 | len = sizeof(*cmc) + cmc->num_hardware_banks * | ||
| 79 | sizeof(struct acpi_hest_ia_error_bank); | ||
| 80 | } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { | ||
| 81 | struct acpi_hest_ia_machine_check *mc; | ||
| 82 | mc = (struct acpi_hest_ia_machine_check *)hest_hdr; | ||
| 83 | len = sizeof(*mc) + mc->num_hardware_banks * | ||
| 84 | sizeof(struct acpi_hest_ia_error_bank); | ||
| 85 | } | ||
| 86 | BUG_ON(len == -1); | ||
| 87 | |||
| 88 | return len; | ||
| 89 | }; | ||
| 90 | |||
| 91 | int apei_hest_parse(apei_hest_func_t func, void *data) | ||
| 92 | { | ||
| 93 | struct acpi_hest_header *hest_hdr; | ||
| 94 | int i, rc, len; | ||
| 95 | |||
| 96 | if (hest_disable) | ||
| 97 | return -EINVAL; | ||
| 98 | |||
| 99 | hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); | ||
| 100 | for (i = 0; i < hest_tab->error_source_count; i++) { | ||
| 101 | len = hest_esrc_len(hest_hdr); | ||
| 102 | if (!len) { | ||
| 103 | pr_warning(FW_WARN HEST_PFX | ||
| 104 | "Unknown or unused hardware error source " | ||
| 105 | "type: %d for hardware error source: %d.\n", | ||
| 106 | hest_hdr->type, hest_hdr->source_id); | ||
| 107 | return -EINVAL; | ||
| 108 | } | ||
| 109 | if ((void *)hest_hdr + len > | ||
| 110 | (void *)hest_tab + hest_tab->header.length) { | ||
| 111 | pr_warning(FW_BUG HEST_PFX | ||
| 112 | "Table contents overflow for hardware error source: %d.\n", | ||
| 113 | hest_hdr->source_id); | ||
| 114 | return -EINVAL; | ||
| 115 | } | ||
| 116 | |||
| 117 | rc = func(hest_hdr, data); | ||
| 118 | if (rc) | ||
| 119 | return rc; | ||
| 120 | |||
| 121 | hest_hdr = (void *)hest_hdr + len; | ||
| 122 | } | ||
| 123 | |||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | EXPORT_SYMBOL_GPL(apei_hest_parse); | ||
| 127 | |||
| 128 | static int __init setup_hest_disable(char *str) | ||
| 129 | { | ||
| 130 | hest_disable = 1; | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | __setup("hest_disable", setup_hest_disable); | ||
| 135 | |||
| 136 | static int __init hest_init(void) | ||
| 137 | { | ||
| 138 | acpi_status status; | ||
| 139 | int rc = -ENODEV; | ||
| 140 | |||
| 141 | if (acpi_disabled) | ||
| 142 | goto err; | ||
| 143 | |||
| 144 | if (hest_disable) { | ||
| 145 | pr_info(HEST_PFX "HEST tabling parsing is disabled.\n"); | ||
| 146 | goto err; | ||
| 147 | } | ||
| 148 | |||
| 149 | status = acpi_get_table(ACPI_SIG_HEST, 0, | ||
| 150 | (struct acpi_table_header **)&hest_tab); | ||
| 151 | if (status == AE_NOT_FOUND) { | ||
| 152 | pr_info(HEST_PFX "Table is not found!\n"); | ||
| 153 | goto err; | ||
| 154 | } else if (ACPI_FAILURE(status)) { | ||
| 155 | const char *msg = acpi_format_exception(status); | ||
| 156 | pr_err(HEST_PFX "Failed to get table, %s\n", msg); | ||
| 157 | rc = -EINVAL; | ||
| 158 | goto err; | ||
| 159 | } | ||
| 160 | |||
| 161 | rc = apei_hest_parse(hest_void_parse, NULL); | ||
| 162 | if (rc) | ||
| 163 | goto err; | ||
| 164 | |||
| 165 | pr_info(HEST_PFX "HEST table parsing is initialized.\n"); | ||
| 166 | |||
| 167 | return 0; | ||
| 168 | err: | ||
| 169 | hest_disable = 1; | ||
| 170 | return rc; | ||
| 171 | } | ||
| 172 | |||
| 173 | subsys_initcall(hest_init); | ||
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c new file mode 100644 index 000000000000..814b19249616 --- /dev/null +++ b/drivers/acpi/atomicio.c | |||
| @@ -0,0 +1,360 @@ | |||
| 1 | /* | ||
| 2 | * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then | ||
| 3 | * accessing in atomic context. | ||
| 4 | * | ||
| 5 | * This is used for NMI handler to access IO memory area, because | ||
| 6 | * ioremap/iounmap can not be used in NMI handler. The IO memory area | ||
| 7 | * is pre-mapped in process context and accessed in NMI handler. | ||
| 8 | * | ||
| 9 | * Copyright (C) 2009-2010, Intel Corp. | ||
| 10 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License version | ||
| 14 | * 2 as published by the Free Software Foundation. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, write to the Free Software | ||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/kernel.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/acpi.h> | ||
| 30 | #include <linux/io.h> | ||
| 31 | #include <linux/kref.h> | ||
| 32 | #include <linux/rculist.h> | ||
| 33 | #include <linux/interrupt.h> | ||
| 34 | #include <acpi/atomicio.h> | ||
| 35 | |||
| 36 | #define ACPI_PFX "ACPI: " | ||
| 37 | |||
| 38 | static LIST_HEAD(acpi_iomaps); | ||
| 39 | /* | ||
| 40 | * Used for mutual exclusion between writers of acpi_iomaps list, for | ||
| 41 | * synchronization between readers and writer, RCU is used. | ||
| 42 | */ | ||
| 43 | static DEFINE_SPINLOCK(acpi_iomaps_lock); | ||
| 44 | |||
| 45 | struct acpi_iomap { | ||
| 46 | struct list_head list; | ||
| 47 | void __iomem *vaddr; | ||
| 48 | unsigned long size; | ||
| 49 | phys_addr_t paddr; | ||
| 50 | struct kref ref; | ||
| 51 | }; | ||
| 52 | |||
| 53 | /* acpi_iomaps_lock or RCU read lock must be held before calling */ | ||
| 54 | static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, | ||
| 55 | unsigned long size) | ||
| 56 | { | ||
| 57 | struct acpi_iomap *map; | ||
| 58 | |||
| 59 | list_for_each_entry_rcu(map, &acpi_iomaps, list) { | ||
| 60 | if (map->paddr + map->size >= paddr + size && | ||
| 61 | map->paddr <= paddr) | ||
| 62 | return map; | ||
| 63 | } | ||
| 64 | return NULL; | ||
| 65 | } | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Atomic "ioremap" used by NMI handler, if the specified IO memory | ||
| 69 | * area is not pre-mapped, NULL will be returned. | ||
| 70 | * | ||
| 71 | * acpi_iomaps_lock or RCU read lock must be held before calling | ||
| 72 | */ | ||
| 73 | static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, | ||
| 74 | unsigned long size) | ||
| 75 | { | ||
| 76 | struct acpi_iomap *map; | ||
| 77 | |||
| 78 | map = __acpi_find_iomap(paddr, size); | ||
| 79 | if (map) | ||
| 80 | return map->vaddr + (paddr - map->paddr); | ||
| 81 | else | ||
| 82 | return NULL; | ||
| 83 | } | ||
| 84 | |||
| 85 | /* acpi_iomaps_lock must be held before calling */ | ||
| 86 | static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, | ||
| 87 | unsigned long size) | ||
| 88 | { | ||
| 89 | struct acpi_iomap *map; | ||
| 90 | |||
| 91 | map = __acpi_find_iomap(paddr, size); | ||
| 92 | if (map) { | ||
| 93 | kref_get(&map->ref); | ||
| 94 | return map->vaddr + (paddr - map->paddr); | ||
| 95 | } else | ||
| 96 | return NULL; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Used to pre-map the specified IO memory area. First try to find | ||
| 101 | * whether the area is already pre-mapped, if it is, increase the | ||
| 102 | * reference count (in __acpi_try_ioremap) and return; otherwise, do | ||
| 103 | * the real ioremap, and add the mapping into acpi_iomaps list. | ||
| 104 | */ | ||
| 105 | static void __iomem *acpi_pre_map(phys_addr_t paddr, | ||
| 106 | unsigned long size) | ||
| 107 | { | ||
| 108 | void __iomem *vaddr; | ||
| 109 | struct acpi_iomap *map; | ||
| 110 | unsigned long pg_sz, flags; | ||
| 111 | phys_addr_t pg_off; | ||
| 112 | |||
| 113 | spin_lock_irqsave(&acpi_iomaps_lock, flags); | ||
| 114 | vaddr = __acpi_try_ioremap(paddr, size); | ||
| 115 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | ||
| 116 | if (vaddr) | ||
| 117 | return vaddr; | ||
| 118 | |||
| 119 | pg_off = paddr & PAGE_MASK; | ||
| 120 | pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; | ||
| 121 | vaddr = ioremap(pg_off, pg_sz); | ||
| 122 | if (!vaddr) | ||
| 123 | return NULL; | ||
| 124 | map = kmalloc(sizeof(*map), GFP_KERNEL); | ||
| 125 | if (!map) | ||
| 126 | goto err_unmap; | ||
| 127 | INIT_LIST_HEAD(&map->list); | ||
| 128 | map->paddr = pg_off; | ||
| 129 | map->size = pg_sz; | ||
| 130 | map->vaddr = vaddr; | ||
| 131 | kref_init(&map->ref); | ||
| 132 | |||
| 133 | spin_lock_irqsave(&acpi_iomaps_lock, flags); | ||
| 134 | vaddr = __acpi_try_ioremap(paddr, size); | ||
| 135 | if (vaddr) { | ||
| 136 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | ||
| 137 | iounmap(map->vaddr); | ||
| 138 | kfree(map); | ||
| 139 | return vaddr; | ||
| 140 | } | ||
| 141 | list_add_tail_rcu(&map->list, &acpi_iomaps); | ||
| 142 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | ||
| 143 | |||
| 144 | return vaddr + (paddr - pg_off); | ||
| 145 | err_unmap: | ||
| 146 | iounmap(vaddr); | ||
| 147 | return NULL; | ||
| 148 | } | ||
| 149 | |||
| 150 | /* acpi_iomaps_lock must be held before calling */ | ||
| 151 | static void __acpi_kref_del_iomap(struct kref *ref) | ||
| 152 | { | ||
| 153 | struct acpi_iomap *map; | ||
| 154 | |||
| 155 | map = container_of(ref, struct acpi_iomap, ref); | ||
| 156 | list_del_rcu(&map->list); | ||
| 157 | } | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Used to post-unmap the specified IO memory area. The iounmap is | ||
| 161 | * done only if the reference count goes zero. | ||
| 162 | */ | ||
| 163 | static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) | ||
| 164 | { | ||
| 165 | struct acpi_iomap *map; | ||
| 166 | unsigned long flags; | ||
| 167 | int del; | ||
| 168 | |||
| 169 | spin_lock_irqsave(&acpi_iomaps_lock, flags); | ||
| 170 | map = __acpi_find_iomap(paddr, size); | ||
| 171 | BUG_ON(!map); | ||
| 172 | del = kref_put(&map->ref, __acpi_kref_del_iomap); | ||
| 173 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | ||
| 174 | |||
| 175 | if (!del) | ||
| 176 | return; | ||
| 177 | |||
| 178 | synchronize_rcu(); | ||
| 179 | iounmap(map->vaddr); | ||
| 180 | kfree(map); | ||
| 181 | } | ||
| 182 | |||
| 183 | /* In NMI handler, should set silent = 1 */ | ||
| 184 | static int acpi_check_gar(struct acpi_generic_address *reg, | ||
| 185 | u64 *paddr, int silent) | ||
| 186 | { | ||
| 187 | u32 width, space_id; | ||
| 188 | |||
| 189 | width = reg->bit_width; | ||
| 190 | space_id = reg->space_id; | ||
| 191 | /* Handle possible alignment issues */ | ||
| 192 | memcpy(paddr, ®->address, sizeof(*paddr)); | ||
| 193 | if (!*paddr) { | ||
| 194 | if (!silent) | ||
| 195 | pr_warning(FW_BUG ACPI_PFX | ||
| 196 | "Invalid physical address in GAR [0x%llx/%u/%u]\n", | ||
| 197 | *paddr, width, space_id); | ||
| 198 | return -EINVAL; | ||
| 199 | } | ||
| 200 | |||
| 201 | if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { | ||
| 202 | if (!silent) | ||
| 203 | pr_warning(FW_BUG ACPI_PFX | ||
| 204 | "Invalid bit width in GAR [0x%llx/%u/%u]\n", | ||
| 205 | *paddr, width, space_id); | ||
| 206 | return -EINVAL; | ||
| 207 | } | ||
| 208 | |||
| 209 | if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && | ||
| 210 | space_id != ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 211 | if (!silent) | ||
| 212 | pr_warning(FW_BUG ACPI_PFX | ||
| 213 | "Invalid address space type in GAR [0x%llx/%u/%u]\n", | ||
| 214 | *paddr, width, space_id); | ||
| 215 | return -EINVAL; | ||
| 216 | } | ||
| 217 | |||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | /* Pre-map, working on GAR */ | ||
| 222 | int acpi_pre_map_gar(struct acpi_generic_address *reg) | ||
| 223 | { | ||
| 224 | u64 paddr; | ||
| 225 | void __iomem *vaddr; | ||
| 226 | int rc; | ||
| 227 | |||
| 228 | if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) | ||
| 229 | return 0; | ||
| 230 | |||
| 231 | rc = acpi_check_gar(reg, &paddr, 0); | ||
| 232 | if (rc) | ||
| 233 | return rc; | ||
| 234 | |||
| 235 | vaddr = acpi_pre_map(paddr, reg->bit_width / 8); | ||
| 236 | if (!vaddr) | ||
| 237 | return -EIO; | ||
| 238 | |||
| 239 | return 0; | ||
| 240 | } | ||
| 241 | EXPORT_SYMBOL_GPL(acpi_pre_map_gar); | ||
| 242 | |||
| 243 | /* Post-unmap, working on GAR */ | ||
| 244 | int acpi_post_unmap_gar(struct acpi_generic_address *reg) | ||
| 245 | { | ||
| 246 | u64 paddr; | ||
| 247 | int rc; | ||
| 248 | |||
| 249 | if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) | ||
| 250 | return 0; | ||
| 251 | |||
| 252 | rc = acpi_check_gar(reg, &paddr, 0); | ||
| 253 | if (rc) | ||
| 254 | return rc; | ||
| 255 | |||
| 256 | acpi_post_unmap(paddr, reg->bit_width / 8); | ||
| 257 | |||
| 258 | return 0; | ||
| 259 | } | ||
| 260 | EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); | ||
| 261 | |||
| 262 | /* | ||
| 263 | * Can be used in atomic (including NMI) or process context. RCU read | ||
| 264 | * lock can only be released after the IO memory area accessing. | ||
| 265 | */ | ||
| 266 | static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) | ||
| 267 | { | ||
| 268 | void __iomem *addr; | ||
| 269 | |||
| 270 | rcu_read_lock(); | ||
| 271 | addr = __acpi_ioremap_fast(paddr, width); | ||
| 272 | switch (width) { | ||
| 273 | case 8: | ||
| 274 | *val = readb(addr); | ||
| 275 | break; | ||
| 276 | case 16: | ||
| 277 | *val = readw(addr); | ||
| 278 | break; | ||
| 279 | case 32: | ||
| 280 | *val = readl(addr); | ||
| 281 | break; | ||
| 282 | case 64: | ||
| 283 | *val = readq(addr); | ||
| 284 | break; | ||
| 285 | default: | ||
| 286 | return -EINVAL; | ||
| 287 | } | ||
| 288 | rcu_read_unlock(); | ||
| 289 | |||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 293 | static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) | ||
| 294 | { | ||
| 295 | void __iomem *addr; | ||
| 296 | |||
| 297 | rcu_read_lock(); | ||
| 298 | addr = __acpi_ioremap_fast(paddr, width); | ||
| 299 | switch (width) { | ||
| 300 | case 8: | ||
| 301 | writeb(val, addr); | ||
| 302 | break; | ||
| 303 | case 16: | ||
| 304 | writew(val, addr); | ||
| 305 | break; | ||
| 306 | case 32: | ||
| 307 | writel(val, addr); | ||
| 308 | break; | ||
| 309 | case 64: | ||
| 310 | writeq(val, addr); | ||
| 311 | break; | ||
| 312 | default: | ||
| 313 | return -EINVAL; | ||
| 314 | } | ||
| 315 | rcu_read_unlock(); | ||
| 316 | |||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | |||
| 320 | /* GAR accessing in atomic (including NMI) or process context */ | ||
| 321 | int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) | ||
| 322 | { | ||
| 323 | u64 paddr; | ||
| 324 | int rc; | ||
| 325 | |||
| 326 | rc = acpi_check_gar(reg, &paddr, 1); | ||
| 327 | if (rc) | ||
| 328 | return rc; | ||
| 329 | |||
| 330 | *val = 0; | ||
| 331 | switch (reg->space_id) { | ||
| 332 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
| 333 | return acpi_atomic_read_mem(paddr, val, reg->bit_width); | ||
| 334 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
| 335 | return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); | ||
| 336 | default: | ||
| 337 | return -EINVAL; | ||
| 338 | } | ||
| 339 | } | ||
| 340 | EXPORT_SYMBOL_GPL(acpi_atomic_read); | ||
| 341 | |||
| 342 | int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) | ||
| 343 | { | ||
| 344 | u64 paddr; | ||
| 345 | int rc; | ||
| 346 | |||
| 347 | rc = acpi_check_gar(reg, &paddr, 1); | ||
| 348 | if (rc) | ||
| 349 | return rc; | ||
| 350 | |||
| 351 | switch (reg->space_id) { | ||
| 352 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
| 353 | return acpi_atomic_write_mem(paddr, val, reg->bit_width); | ||
| 354 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
| 355 | return acpi_os_write_port(paddr, val, reg->bit_width); | ||
| 356 | default: | ||
| 357 | return -EINVAL; | ||
| 358 | } | ||
| 359 | } | ||
| 360 | EXPORT_SYMBOL_GPL(acpi_atomic_write); | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f2234db85da0..e61d4f8e62a5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void) | |||
| 1027 | /* Don't trust ECDT, which comes from ASUSTek */ | 1027 | /* Don't trust ECDT, which comes from ASUSTek */ |
| 1028 | if (!EC_FLAGS_VALIDATE_ECDT) | 1028 | if (!EC_FLAGS_VALIDATE_ECDT) |
| 1029 | goto install; | 1029 | goto install; |
| 1030 | saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); | 1030 | saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL); |
| 1031 | if (!saved_ec) | 1031 | if (!saved_ec) |
| 1032 | return -ENOMEM; | 1032 | return -ENOMEM; |
| 1033 | memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); | ||
| 1034 | /* fall through */ | 1033 | /* fall through */ |
| 1035 | } | 1034 | } |
| 1036 | 1035 | ||
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c new file mode 100644 index 000000000000..d0c1967f7597 --- /dev/null +++ b/drivers/acpi/hed.c | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * ACPI Hardware Error Device (PNP0C33) Driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010, Intel Corp. | ||
| 5 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 6 | * | ||
| 7 | * ACPI Hardware Error Device is used to report some hardware errors | ||
| 8 | * notified via SCI, mainly the corrected errors. | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License version | ||
| 12 | * 2 as published by the Free Software Foundation; | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License | ||
| 20 | * along with this program; if not, write to the Free Software | ||
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/init.h> | ||
| 27 | #include <linux/acpi.h> | ||
| 28 | #include <acpi/acpi_bus.h> | ||
| 29 | #include <acpi/acpi_drivers.h> | ||
| 30 | #include <acpi/hed.h> | ||
| 31 | |||
| 32 | static struct acpi_device_id acpi_hed_ids[] = { | ||
| 33 | {"PNP0C33", 0}, | ||
| 34 | {"", 0}, | ||
| 35 | }; | ||
| 36 | MODULE_DEVICE_TABLE(acpi, acpi_hed_ids); | ||
| 37 | |||
| 38 | static acpi_handle hed_handle; | ||
| 39 | |||
| 40 | static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list); | ||
| 41 | |||
| 42 | int register_acpi_hed_notifier(struct notifier_block *nb) | ||
| 43 | { | ||
| 44 | return blocking_notifier_chain_register(&acpi_hed_notify_list, nb); | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL_GPL(register_acpi_hed_notifier); | ||
| 47 | |||
| 48 | void unregister_acpi_hed_notifier(struct notifier_block *nb) | ||
| 49 | { | ||
| 50 | blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb); | ||
| 51 | } | ||
| 52 | EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier); | ||
| 53 | |||
| 54 | /* | ||
| 55 | * SCI to report hardware error is forwarded to the listeners of HED, | ||
| 56 | * it is used by HEST Generic Hardware Error Source with notify type | ||
| 57 | * SCI. | ||
| 58 | */ | ||
| 59 | static void acpi_hed_notify(struct acpi_device *device, u32 event) | ||
| 60 | { | ||
| 61 | blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL); | ||
| 62 | } | ||
| 63 | |||
| 64 | static int __devinit acpi_hed_add(struct acpi_device *device) | ||
| 65 | { | ||
| 66 | /* Only one hardware error device */ | ||
| 67 | if (hed_handle) | ||
| 68 | return -EINVAL; | ||
| 69 | hed_handle = device->handle; | ||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | static int __devexit acpi_hed_remove(struct acpi_device *device, int type) | ||
| 74 | { | ||
| 75 | hed_handle = NULL; | ||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct acpi_driver acpi_hed_driver = { | ||
| 80 | .name = "hardware_error_device", | ||
| 81 | .class = "hardware_error", | ||
| 82 | .ids = acpi_hed_ids, | ||
| 83 | .ops = { | ||
| 84 | .add = acpi_hed_add, | ||
| 85 | .remove = acpi_hed_remove, | ||
| 86 | .notify = acpi_hed_notify, | ||
| 87 | }, | ||
| 88 | }; | ||
| 89 | |||
| 90 | static int __init acpi_hed_init(void) | ||
| 91 | { | ||
| 92 | if (acpi_disabled) | ||
| 93 | return -ENODEV; | ||
| 94 | |||
| 95 | if (acpi_bus_register_driver(&acpi_hed_driver) < 0) | ||
| 96 | return -ENODEV; | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static void __exit acpi_hed_exit(void) | ||
| 102 | { | ||
| 103 | acpi_bus_unregister_driver(&acpi_hed_driver); | ||
| 104 | } | ||
| 105 | |||
| 106 | module_init(acpi_hed_init); | ||
| 107 | module_exit(acpi_hed_exit); | ||
| 108 | |||
| 109 | ACPI_MODULE_NAME("hed"); | ||
| 110 | MODULE_AUTHOR("Huang Ying"); | ||
| 111 | MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); | ||
| 112 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c deleted file mode 100644 index 1c527a192872..000000000000 --- a/drivers/acpi/hest.c +++ /dev/null | |||
| @@ -1,139 +0,0 @@ | |||
| 1 | #include <linux/acpi.h> | ||
| 2 | #include <linux/pci.h> | ||
| 3 | |||
| 4 | #define PREFIX "ACPI: " | ||
| 5 | |||
| 6 | static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p) | ||
| 7 | { | ||
| 8 | return sizeof(*p) + | ||
| 9 | (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); | ||
| 10 | } | ||
| 11 | |||
| 12 | static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p) | ||
| 13 | { | ||
| 14 | return sizeof(*p) + | ||
| 15 | (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); | ||
| 16 | } | ||
| 17 | |||
| 18 | static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p) | ||
| 19 | { | ||
| 20 | return sizeof(*p); | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p) | ||
| 24 | { | ||
| 25 | return sizeof(*p); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci) | ||
| 29 | { | ||
| 30 | return (0 == pci_domain_nr(pci->bus) && | ||
| 31 | p->bus == pci->bus->number && | ||
| 32 | p->device == PCI_SLOT(pci->devfn) && | ||
| 33 | p->function == PCI_FUNC(pci->devfn)); | ||
| 34 | } | ||
| 35 | |||
| 36 | static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first) | ||
| 37 | { | ||
| 38 | struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header); | ||
| 39 | unsigned long rc=0; | ||
| 40 | u8 pcie_type = 0; | ||
| 41 | u8 bridge = 0; | ||
| 42 | switch (type) { | ||
| 43 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
| 44 | rc = sizeof(struct acpi_hest_aer_root); | ||
| 45 | pcie_type = PCI_EXP_TYPE_ROOT_PORT; | ||
| 46 | break; | ||
| 47 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
| 48 | rc = sizeof(struct acpi_hest_aer); | ||
| 49 | pcie_type = PCI_EXP_TYPE_ENDPOINT; | ||
| 50 | break; | ||
| 51 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
| 52 | rc = sizeof(struct acpi_hest_aer_bridge); | ||
| 53 | if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE) | ||
| 54 | bridge = 1; | ||
| 55 | break; | ||
| 56 | } | ||
| 57 | |||
| 58 | if (p->flags & ACPI_HEST_GLOBAL) { | ||
| 59 | if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge) | ||
| 60 | *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
| 61 | } | ||
| 62 | else | ||
| 63 | if (hest_match_pci(p, pci)) | ||
| 64 | *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
| 65 | return rc; | ||
| 66 | } | ||
| 67 | |||
| 68 | static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci) | ||
| 69 | { | ||
| 70 | struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader; | ||
| 71 | void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */ | ||
| 72 | struct acpi_hest_header *hdr = p; | ||
| 73 | |||
| 74 | int i; | ||
| 75 | int firmware_first = 0; | ||
| 76 | static unsigned char printed_unused = 0; | ||
| 77 | static unsigned char printed_reserved = 0; | ||
| 78 | |||
| 79 | for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) { | ||
| 80 | switch (hdr->type) { | ||
| 81 | case ACPI_HEST_TYPE_IA32_CHECK: | ||
| 82 | p += parse_acpi_hest_ia_machine_check(p); | ||
| 83 | break; | ||
| 84 | case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK: | ||
| 85 | p += parse_acpi_hest_ia_corrected(p); | ||
| 86 | break; | ||
| 87 | case ACPI_HEST_TYPE_IA32_NMI: | ||
| 88 | p += parse_acpi_hest_ia_nmi(p); | ||
| 89 | break; | ||
| 90 | /* These three should never appear */ | ||
| 91 | case ACPI_HEST_TYPE_NOT_USED3: | ||
| 92 | case ACPI_HEST_TYPE_NOT_USED4: | ||
| 93 | case ACPI_HEST_TYPE_NOT_USED5: | ||
| 94 | if (!printed_unused) { | ||
| 95 | printk(KERN_DEBUG PREFIX | ||
| 96 | "HEST Error Source list contains an obsolete type (%d).\n", hdr->type); | ||
| 97 | printed_unused = 1; | ||
| 98 | } | ||
| 99 | break; | ||
| 100 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
| 101 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
| 102 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
| 103 | p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first); | ||
| 104 | break; | ||
| 105 | case ACPI_HEST_TYPE_GENERIC_ERROR: | ||
| 106 | p += parse_acpi_hest_generic(p); | ||
| 107 | break; | ||
| 108 | /* These should never appear either */ | ||
| 109 | case ACPI_HEST_TYPE_RESERVED: | ||
| 110 | default: | ||
| 111 | if (!printed_reserved) { | ||
| 112 | printk(KERN_DEBUG PREFIX | ||
| 113 | "HEST Error Source list contains a reserved type (%d).\n", hdr->type); | ||
| 114 | printed_reserved = 1; | ||
| 115 | } | ||
| 116 | break; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | return firmware_first; | ||
| 120 | } | ||
| 121 | |||
| 122 | int acpi_hest_firmware_first_pci(struct pci_dev *pci) | ||
| 123 | { | ||
| 124 | acpi_status status = AE_NOT_FOUND; | ||
| 125 | struct acpi_table_header *hest = NULL; | ||
| 126 | |||
| 127 | if (acpi_disabled) | ||
| 128 | return 0; | ||
| 129 | |||
| 130 | status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); | ||
| 131 | |||
| 132 | if (ACPI_SUCCESS(status)) { | ||
| 133 | if (acpi_hest_firmware_first(hest, pci)) { | ||
| 134 | return 1; | ||
| 135 | } | ||
| 136 | } | ||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci); | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index aefce33f2a09..4eac59393edc 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
| @@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) | |||
| 120 | struct acpi_pci_root *root; | 120 | struct acpi_pci_root *root; |
| 121 | 121 | ||
| 122 | list_for_each_entry(root, &acpi_pci_roots, node) | 122 | list_for_each_entry(root, &acpi_pci_roots, node) |
| 123 | if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) | 123 | if ((root->segment == (u16) seg) && |
| 124 | (root->secondary.start == (u16) bus)) | ||
| 124 | return root->device->handle; | 125 | return root->device->handle; |
| 125 | return NULL; | 126 | return NULL; |
| 126 | } | 127 | } |
| @@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge); | |||
| 154 | static acpi_status | 155 | static acpi_status |
| 155 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | 156 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) |
| 156 | { | 157 | { |
| 157 | int *busnr = data; | 158 | struct resource *res = data; |
| 158 | struct acpi_resource_address64 address; | 159 | struct acpi_resource_address64 address; |
| 159 | 160 | ||
| 160 | if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && | 161 | if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && |
| @@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | |||
| 164 | 165 | ||
| 165 | acpi_resource_to_address64(resource, &address); | 166 | acpi_resource_to_address64(resource, &address); |
| 166 | if ((address.address_length > 0) && | 167 | if ((address.address_length > 0) && |
| 167 | (address.resource_type == ACPI_BUS_NUMBER_RANGE)) | 168 | (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { |
| 168 | *busnr = address.minimum; | 169 | res->start = address.minimum; |
| 170 | res->end = address.minimum + address.address_length - 1; | ||
| 171 | } | ||
| 169 | 172 | ||
| 170 | return AE_OK; | 173 | return AE_OK; |
| 171 | } | 174 | } |
| 172 | 175 | ||
| 173 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, | 176 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, |
| 174 | unsigned long long *bus) | 177 | struct resource *res) |
| 175 | { | 178 | { |
| 176 | acpi_status status; | 179 | acpi_status status; |
| 177 | int busnum; | ||
| 178 | 180 | ||
| 179 | busnum = -1; | 181 | res->start = -1; |
| 180 | status = | 182 | status = |
| 181 | acpi_walk_resources(handle, METHOD_NAME__CRS, | 183 | acpi_walk_resources(handle, METHOD_NAME__CRS, |
| 182 | get_root_bridge_busnr_callback, &busnum); | 184 | get_root_bridge_busnr_callback, res); |
| 183 | if (ACPI_FAILURE(status)) | 185 | if (ACPI_FAILURE(status)) |
| 184 | return status; | 186 | return status; |
| 185 | /* Check if we really get a bus number from _CRS */ | 187 | if (res->start == -1) |
| 186 | if (busnum == -1) | ||
| 187 | return AE_ERROR; | 188 | return AE_ERROR; |
| 188 | *bus = busnum; | ||
| 189 | return AE_OK; | 189 | return AE_OK; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| @@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
| 429 | struct acpi_device *child; | 429 | struct acpi_device *child; |
| 430 | u32 flags, base_flags; | 430 | u32 flags, base_flags; |
| 431 | 431 | ||
| 432 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | ||
| 433 | if (!root) | ||
| 434 | return -ENOMEM; | ||
| 435 | |||
| 432 | segment = 0; | 436 | segment = 0; |
| 433 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, | 437 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, |
| 434 | &segment); | 438 | &segment); |
| 435 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 439 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
| 436 | printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); | 440 | printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); |
| 437 | return -ENODEV; | 441 | result = -ENODEV; |
| 442 | goto end; | ||
| 438 | } | 443 | } |
| 439 | 444 | ||
| 440 | /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ | 445 | /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ |
| 441 | bus = 0; | 446 | root->secondary.flags = IORESOURCE_BUS; |
| 442 | status = try_get_root_bridge_busnr(device->handle, &bus); | 447 | status = try_get_root_bridge_busnr(device->handle, &root->secondary); |
| 443 | if (ACPI_FAILURE(status)) { | 448 | if (ACPI_FAILURE(status)) { |
| 449 | /* | ||
| 450 | * We need both the start and end of the downstream bus range | ||
| 451 | * to interpret _CBA (MMCONFIG base address), so it really is | ||
| 452 | * supposed to be in _CRS. If we don't find it there, all we | ||
| 453 | * can do is assume [_BBN-0xFF] or [0-0xFF]. | ||
| 454 | */ | ||
| 455 | root->secondary.end = 0xFF; | ||
| 456 | printk(KERN_WARNING FW_BUG PREFIX | ||
| 457 | "no secondary bus range in _CRS\n"); | ||
| 444 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); | 458 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); |
| 445 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 459 | if (ACPI_SUCCESS(status)) |
| 446 | printk(KERN_ERR PREFIX | 460 | root->secondary.start = bus; |
| 447 | "no bus number in _CRS and can't evaluate _BBN\n"); | 461 | else if (status == AE_NOT_FOUND) |
| 448 | return -ENODEV; | 462 | root->secondary.start = 0; |
| 463 | else { | ||
| 464 | printk(KERN_ERR PREFIX "can't evaluate _BBN\n"); | ||
| 465 | result = -ENODEV; | ||
| 466 | goto end; | ||
| 449 | } | 467 | } |
| 450 | } | 468 | } |
| 451 | 469 | ||
| 452 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | ||
| 453 | if (!root) | ||
| 454 | return -ENOMEM; | ||
| 455 | |||
| 456 | INIT_LIST_HEAD(&root->node); | 470 | INIT_LIST_HEAD(&root->node); |
| 457 | root->device = device; | 471 | root->device = device; |
| 458 | root->segment = segment & 0xFFFF; | 472 | root->segment = segment & 0xFFFF; |
| 459 | root->bus_nr = bus & 0xFF; | ||
| 460 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); | 473 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); |
| 461 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); | 474 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); |
| 462 | device->driver_data = root; | 475 | device->driver_data = root; |
| @@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
| 475 | /* TBD: Locking */ | 488 | /* TBD: Locking */ |
| 476 | list_add_tail(&root->node, &acpi_pci_roots); | 489 | list_add_tail(&root->node, &acpi_pci_roots); |
| 477 | 490 | ||
| 478 | printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", | 491 | printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n", |
| 479 | acpi_device_name(device), acpi_device_bid(device), | 492 | acpi_device_name(device), acpi_device_bid(device), |
| 480 | root->segment, root->bus_nr); | 493 | root->segment, &root->secondary); |
| 481 | 494 | ||
| 482 | /* | 495 | /* |
| 483 | * Scan the Root Bridge | 496 | * Scan the Root Bridge |
| @@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
| 486 | * PCI namespace does not get created until this call is made (and | 499 | * PCI namespace does not get created until this call is made (and |
| 487 | * thus the root bridge's pci_dev does not exist). | 500 | * thus the root bridge's pci_dev does not exist). |
| 488 | */ | 501 | */ |
| 489 | root->bus = pci_acpi_scan_root(device, segment, bus); | 502 | root->bus = pci_acpi_scan_root(root); |
| 490 | if (!root->bus) { | 503 | if (!root->bus) { |
| 491 | printk(KERN_ERR PREFIX | 504 | printk(KERN_ERR PREFIX |
| 492 | "Bus %04x:%02x not present in PCI namespace\n", | 505 | "Bus %04x:%02x not present in PCI namespace\n", |
| 493 | root->segment, root->bus_nr); | 506 | root->segment, (unsigned int)root->secondary.start); |
| 494 | result = -ENODEV; | 507 | result = -ENODEV; |
| 495 | goto end; | 508 | goto end; |
| 496 | } | 509 | } |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 5675d9747e87..b1034a9ada4e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
| @@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
| 616 | acpi_processor_get_limit_info(pr); | 616 | acpi_processor_get_limit_info(pr); |
| 617 | 617 | ||
| 618 | 618 | ||
| 619 | acpi_processor_power_init(pr, device); | 619 | if (cpuidle_get_driver() == &acpi_idle_driver) |
| 620 | acpi_processor_power_init(pr, device); | ||
| 620 | 621 | ||
| 621 | pr->cdev = thermal_cooling_device_register("Processor", device, | 622 | pr->cdev = thermal_cooling_device_register("Processor", device, |
| 622 | &processor_cooling_ops); | 623 | &processor_cooling_ops); |
| @@ -920,9 +921,14 @@ static int __init acpi_processor_init(void) | |||
| 920 | if (!acpi_processor_dir) | 921 | if (!acpi_processor_dir) |
| 921 | return -ENOMEM; | 922 | return -ENOMEM; |
| 922 | #endif | 923 | #endif |
| 923 | result = cpuidle_register_driver(&acpi_idle_driver); | 924 | |
| 924 | if (result < 0) | 925 | if (!cpuidle_register_driver(&acpi_idle_driver)) { |
| 925 | goto out_proc; | 926 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", |
| 927 | acpi_idle_driver.name); | ||
| 928 | } else { | ||
| 929 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", | ||
| 930 | cpuidle_get_driver()->name); | ||
| 931 | } | ||
| 926 | 932 | ||
| 927 | result = acpi_bus_register_driver(&acpi_processor_driver); | 933 | result = acpi_bus_register_driver(&acpi_processor_driver); |
| 928 | if (result < 0) | 934 | if (result < 0) |
| @@ -941,7 +947,6 @@ static int __init acpi_processor_init(void) | |||
| 941 | out_cpuidle: | 947 | out_cpuidle: |
| 942 | cpuidle_unregister_driver(&acpi_idle_driver); | 948 | cpuidle_unregister_driver(&acpi_idle_driver); |
| 943 | 949 | ||
| 944 | out_proc: | ||
| 945 | #ifdef CONFIG_ACPI_PROCFS | 950 | #ifdef CONFIG_ACPI_PROCFS |
| 946 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | 951 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); |
| 947 | #endif | 952 | #endif |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c3817e1f32c7..2e8c27d48f2b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | |||
| 727 | break; | 727 | break; |
| 728 | } | 728 | } |
| 729 | 729 | ||
| 730 | if (pr->power.states[i].promotion.state) | 730 | seq_puts(seq, "promotion[--] "); |
| 731 | seq_printf(seq, "promotion[C%zd] ", | 731 | |
| 732 | (pr->power.states[i].promotion.state - | 732 | seq_puts(seq, "demotion[--] "); |
| 733 | pr->power.states)); | ||
| 734 | else | ||
| 735 | seq_puts(seq, "promotion[--] "); | ||
| 736 | |||
| 737 | if (pr->power.states[i].demotion.state) | ||
| 738 | seq_printf(seq, "demotion[C%zd] ", | ||
| 739 | (pr->power.states[i].demotion.state - | ||
| 740 | pr->power.states)); | ||
| 741 | else | ||
| 742 | seq_puts(seq, "demotion[--] "); | ||
| 743 | 733 | ||
| 744 | seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", | 734 | seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", |
| 745 | pr->power.states[i].latency, | 735 | pr->power.states[i].latency, |
| @@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 869 | struct acpi_processor *pr; | 859 | struct acpi_processor *pr; |
| 870 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 860 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); |
| 871 | ktime_t kt1, kt2; | 861 | ktime_t kt1, kt2; |
| 862 | s64 idle_time_ns; | ||
| 872 | s64 idle_time; | 863 | s64 idle_time; |
| 873 | s64 sleep_ticks = 0; | 864 | s64 sleep_ticks = 0; |
| 874 | 865 | ||
| @@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 881 | return(acpi_idle_enter_c1(dev, state)); | 872 | return(acpi_idle_enter_c1(dev, state)); |
| 882 | 873 | ||
| 883 | local_irq_disable(); | 874 | local_irq_disable(); |
| 875 | |||
| 884 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 876 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
| 885 | current_thread_info()->status &= ~TS_POLLING; | 877 | current_thread_info()->status &= ~TS_POLLING; |
| 886 | /* | 878 | /* |
| @@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 888 | * NEED_RESCHED: | 880 | * NEED_RESCHED: |
| 889 | */ | 881 | */ |
| 890 | smp_mb(); | 882 | smp_mb(); |
| 891 | } | ||
| 892 | 883 | ||
| 893 | if (unlikely(need_resched())) { | 884 | if (unlikely(need_resched())) { |
| 894 | current_thread_info()->status |= TS_POLLING; | 885 | current_thread_info()->status |= TS_POLLING; |
| 895 | local_irq_enable(); | 886 | local_irq_enable(); |
| 896 | return 0; | 887 | return 0; |
| 888 | } | ||
| 897 | } | 889 | } |
| 898 | 890 | ||
| 899 | /* | 891 | /* |
| @@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 910 | sched_clock_idle_sleep_event(); | 902 | sched_clock_idle_sleep_event(); |
| 911 | acpi_idle_do_entry(cx); | 903 | acpi_idle_do_entry(cx); |
| 912 | kt2 = ktime_get_real(); | 904 | kt2 = ktime_get_real(); |
| 913 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | 905 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); |
| 906 | idle_time = idle_time_ns; | ||
| 907 | do_div(idle_time, NSEC_PER_USEC); | ||
| 914 | 908 | ||
| 915 | sleep_ticks = us_to_pm_timer_ticks(idle_time); | 909 | sleep_ticks = us_to_pm_timer_ticks(idle_time); |
| 916 | 910 | ||
| 917 | /* Tell the scheduler how much we idled: */ | 911 | /* Tell the scheduler how much we idled: */ |
| 918 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | 912 | sched_clock_idle_wakeup_event(idle_time_ns); |
| 919 | 913 | ||
| 920 | local_irq_enable(); | 914 | local_irq_enable(); |
| 921 | current_thread_info()->status |= TS_POLLING; | 915 | if (cx->entry_method != ACPI_CSTATE_FFH) |
| 916 | current_thread_info()->status |= TS_POLLING; | ||
| 922 | 917 | ||
| 923 | cx->usage++; | 918 | cx->usage++; |
| 924 | 919 | ||
| @@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 943 | struct acpi_processor *pr; | 938 | struct acpi_processor *pr; |
| 944 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 939 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); |
| 945 | ktime_t kt1, kt2; | 940 | ktime_t kt1, kt2; |
| 941 | s64 idle_time_ns; | ||
| 946 | s64 idle_time; | 942 | s64 idle_time; |
| 947 | s64 sleep_ticks = 0; | 943 | s64 sleep_ticks = 0; |
| 948 | 944 | ||
| @@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 968 | } | 964 | } |
| 969 | 965 | ||
| 970 | local_irq_disable(); | 966 | local_irq_disable(); |
| 967 | |||
| 971 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 968 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
| 972 | current_thread_info()->status &= ~TS_POLLING; | 969 | current_thread_info()->status &= ~TS_POLLING; |
| 973 | /* | 970 | /* |
| @@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 975 | * NEED_RESCHED: | 972 | * NEED_RESCHED: |
| 976 | */ | 973 | */ |
| 977 | smp_mb(); | 974 | smp_mb(); |
| 978 | } | ||
| 979 | 975 | ||
| 980 | if (unlikely(need_resched())) { | 976 | if (unlikely(need_resched())) { |
| 981 | current_thread_info()->status |= TS_POLLING; | 977 | current_thread_info()->status |= TS_POLLING; |
| 982 | local_irq_enable(); | 978 | local_irq_enable(); |
| 983 | return 0; | 979 | return 0; |
| 980 | } | ||
| 984 | } | 981 | } |
| 985 | 982 | ||
| 986 | acpi_unlazy_tlb(smp_processor_id()); | 983 | acpi_unlazy_tlb(smp_processor_id()); |
| @@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 1025 | spin_unlock(&c3_lock); | 1022 | spin_unlock(&c3_lock); |
| 1026 | } | 1023 | } |
| 1027 | kt2 = ktime_get_real(); | 1024 | kt2 = ktime_get_real(); |
| 1028 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | 1025 | idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1)); |
| 1026 | idle_time = idle_time_ns; | ||
| 1027 | do_div(idle_time, NSEC_PER_USEC); | ||
| 1029 | 1028 | ||
| 1030 | sleep_ticks = us_to_pm_timer_ticks(idle_time); | 1029 | sleep_ticks = us_to_pm_timer_ticks(idle_time); |
| 1031 | /* Tell the scheduler how much we idled: */ | 1030 | /* Tell the scheduler how much we idled: */ |
| 1032 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | 1031 | sched_clock_idle_wakeup_event(idle_time_ns); |
| 1033 | 1032 | ||
| 1034 | local_irq_enable(); | 1033 | local_irq_enable(); |
| 1035 | current_thread_info()->status |= TS_POLLING; | 1034 | if (cx->entry_method != ACPI_CSTATE_FFH) |
| 1035 | current_thread_info()->status |= TS_POLLING; | ||
| 1036 | 1036 | ||
| 1037 | cx->usage++; | 1037 | cx->usage++; |
| 1038 | 1038 | ||
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index baa76bbf244a..4ab2275b4461 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
| 80 | 80 | ||
| 81 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
| 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
| 83 | /* | ||
| 84 | * According to the ACPI specification the BIOS should make sure that ACPI is | ||
| 85 | * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, | ||
| 86 | * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI | ||
| 87 | * on such systems during resume. Unfortunately that doesn't help in | ||
| 88 | * particularly pathological cases in which SCI_EN has to be set directly on | ||
| 89 | * resume, although the specification states very clearly that this flag is | ||
| 90 | * owned by the hardware. The set_sci_en_on_resume variable will be set in such | ||
| 91 | * cases. | ||
| 92 | */ | ||
| 93 | static bool set_sci_en_on_resume; | ||
| 94 | |||
| 95 | void __init acpi_set_sci_en_on_resume(void) | ||
| 96 | { | ||
| 97 | set_sci_en_on_resume = true; | ||
| 98 | } | ||
| 99 | 83 | ||
| 100 | /* | 84 | /* |
| 101 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the | 85 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the |
| @@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
| 253 | break; | 237 | break; |
| 254 | } | 238 | } |
| 255 | 239 | ||
| 256 | /* If ACPI is not enabled by the BIOS, we need to enable it here. */ | 240 | /* This violates the spec but is required for bug compatibility. */ |
| 257 | if (set_sci_en_on_resume) | 241 | acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); |
| 258 | acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); | ||
| 259 | else | ||
| 260 | acpi_enable(); | ||
| 261 | 242 | ||
| 262 | /* Reprogram control registers and execute _BFS */ | 243 | /* Reprogram control registers and execute _BFS */ |
| 263 | acpi_leave_sleep_state_prep(acpi_state); | 244 | acpi_leave_sleep_state_prep(acpi_state); |
| @@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) | |||
| 346 | return 0; | 327 | return 0; |
| 347 | } | 328 | } |
| 348 | 329 | ||
| 349 | static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d) | ||
| 350 | { | ||
| 351 | set_sci_en_on_resume = true; | ||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | 330 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { |
| 356 | { | 331 | { |
| 357 | .callback = init_old_suspend_ordering, | 332 | .callback = init_old_suspend_ordering, |
| @@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 370 | }, | 345 | }, |
| 371 | }, | 346 | }, |
| 372 | { | 347 | { |
| 373 | .callback = init_set_sci_en_on_resume, | ||
| 374 | .ident = "Apple MacBook 1,1", | ||
| 375 | .matches = { | ||
| 376 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), | ||
| 377 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), | ||
| 378 | }, | ||
| 379 | }, | ||
| 380 | { | ||
| 381 | .callback = init_set_sci_en_on_resume, | ||
| 382 | .ident = "Apple MacMini 1,1", | ||
| 383 | .matches = { | ||
| 384 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), | ||
| 385 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), | ||
| 386 | }, | ||
| 387 | }, | ||
| 388 | { | ||
| 389 | .callback = init_old_suspend_ordering, | 348 | .callback = init_old_suspend_ordering, |
| 390 | .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", | 349 | .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", |
| 391 | .matches = { | 350 | .matches = { |
| @@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 394 | }, | 353 | }, |
| 395 | }, | 354 | }, |
| 396 | { | 355 | { |
| 397 | .callback = init_set_sci_en_on_resume, | ||
| 398 | .ident = "Toshiba Satellite L300", | ||
| 399 | .matches = { | ||
| 400 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 401 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), | ||
| 402 | }, | ||
| 403 | }, | ||
| 404 | { | ||
| 405 | .callback = init_set_sci_en_on_resume, | ||
| 406 | .ident = "Hewlett-Packard HP G7000 Notebook PC", | ||
| 407 | .matches = { | ||
| 408 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 409 | DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"), | ||
| 410 | }, | ||
| 411 | }, | ||
| 412 | { | ||
| 413 | .callback = init_set_sci_en_on_resume, | ||
| 414 | .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC", | ||
| 415 | .matches = { | ||
| 416 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 417 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"), | ||
| 418 | }, | ||
| 419 | }, | ||
| 420 | { | ||
| 421 | .callback = init_set_sci_en_on_resume, | ||
| 422 | .ident = "Hewlett-Packard Pavilion dv4", | ||
| 423 | .matches = { | ||
| 424 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 425 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"), | ||
| 426 | }, | ||
| 427 | }, | ||
| 428 | { | ||
| 429 | .callback = init_set_sci_en_on_resume, | ||
| 430 | .ident = "Hewlett-Packard Pavilion dv7", | ||
| 431 | .matches = { | ||
| 432 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 433 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"), | ||
| 434 | }, | ||
| 435 | }, | ||
| 436 | { | ||
| 437 | .callback = init_set_sci_en_on_resume, | ||
| 438 | .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC", | ||
| 439 | .matches = { | ||
| 440 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 441 | DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"), | ||
| 442 | }, | ||
| 443 | }, | ||
| 444 | { | ||
| 445 | .callback = init_set_sci_en_on_resume, | ||
| 446 | .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC", | ||
| 447 | .matches = { | ||
| 448 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 449 | DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"), | ||
| 450 | }, | ||
| 451 | }, | ||
| 452 | { | ||
| 453 | .callback = init_set_sci_en_on_resume, | ||
| 454 | .ident = "Lenovo ThinkPad T410", | ||
| 455 | .matches = { | ||
| 456 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 457 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), | ||
| 458 | }, | ||
| 459 | }, | ||
| 460 | { | ||
| 461 | .callback = init_set_sci_en_on_resume, | ||
| 462 | .ident = "Lenovo ThinkPad T510", | ||
| 463 | .matches = { | ||
| 464 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 465 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), | ||
| 466 | }, | ||
| 467 | }, | ||
| 468 | { | ||
| 469 | .callback = init_set_sci_en_on_resume, | ||
| 470 | .ident = "Lenovo ThinkPad W510", | ||
| 471 | .matches = { | ||
| 472 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 473 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), | ||
| 474 | }, | ||
| 475 | }, | ||
| 476 | { | ||
| 477 | .callback = init_set_sci_en_on_resume, | ||
| 478 | .ident = "Lenovo ThinkPad X201[s]", | ||
| 479 | .matches = { | ||
| 480 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 481 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), | ||
| 482 | }, | ||
| 483 | }, | ||
| 484 | { | ||
| 485 | .callback = init_old_suspend_ordering, | 356 | .callback = init_old_suspend_ordering, |
| 486 | .ident = "Panasonic CF51-2L", | 357 | .ident = "Panasonic CF51-2L", |
| 487 | .matches = { | 358 | .matches = { |
| @@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 490 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | 361 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), |
| 491 | }, | 362 | }, |
| 492 | }, | 363 | }, |
| 493 | { | ||
| 494 | .callback = init_set_sci_en_on_resume, | ||
| 495 | .ident = "Dell Studio 1558", | ||
| 496 | .matches = { | ||
| 497 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 498 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"), | ||
| 499 | }, | ||
| 500 | }, | ||
| 501 | { | ||
| 502 | .callback = init_set_sci_en_on_resume, | ||
| 503 | .ident = "Dell Studio 1557", | ||
| 504 | .matches = { | ||
| 505 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 506 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"), | ||
| 507 | }, | ||
| 508 | }, | ||
| 509 | { | ||
| 510 | .callback = init_set_sci_en_on_resume, | ||
| 511 | .ident = "Dell Studio 1555", | ||
| 512 | .matches = { | ||
| 513 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 514 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"), | ||
| 515 | }, | ||
| 516 | }, | ||
| 517 | {}, | 364 | {}, |
| 518 | }; | 365 | }; |
| 519 | #endif /* CONFIG_SUSPEND */ | 366 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 8a8f3b3382a6..25b8bd149284 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | extern u8 sleep_states[]; | 2 | extern u8 sleep_states[]; |
| 3 | extern int acpi_suspend (u32 state); | 3 | extern int acpi_suspend(u32 state); |
| 4 | 4 | ||
| 5 | extern void acpi_enable_wakeup_device_prep(u8 sleep_state); | 5 | extern void acpi_enable_wakeup_device_prep(u8 sleep_state); |
| 6 | extern void acpi_enable_wakeup_device(u8 sleep_state); | 6 | extern void acpi_enable_wakeup_device(u8 sleep_state); |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8a0ed2800e63..f336bca7c450 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
| @@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, | |||
| 213 | unsigned long table_end; | 213 | unsigned long table_end; |
| 214 | acpi_size tbl_size; | 214 | acpi_size tbl_size; |
| 215 | 215 | ||
| 216 | if (acpi_disabled && !acpi_ht) | 216 | if (acpi_disabled) |
| 217 | return -ENODEV; | 217 | return -ENODEV; |
| 218 | 218 | ||
| 219 | if (!handler) | 219 | if (!handler) |
| @@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
| 280 | struct acpi_table_header *table = NULL; | 280 | struct acpi_table_header *table = NULL; |
| 281 | acpi_size tbl_size; | 281 | acpi_size tbl_size; |
| 282 | 282 | ||
| 283 | if (acpi_disabled && !acpi_ht) | 283 | if (acpi_disabled) |
| 284 | return -ENODEV; | 284 | return -ENODEV; |
| 285 | 285 | ||
| 286 | if (!handler) | 286 | if (!handler) |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a0c93b321482..9865d46f49a8 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <acpi/acpi_bus.h> | 45 | #include <acpi/acpi_bus.h> |
| 46 | #include <acpi/acpi_drivers.h> | 46 | #include <acpi/acpi_drivers.h> |
| 47 | #include <linux/suspend.h> | 47 | #include <linux/suspend.h> |
| 48 | #include <acpi/video.h> | ||
| 48 | 49 | ||
| 49 | #define PREFIX "ACPI: " | 50 | #define PREFIX "ACPI: " |
| 50 | 51 | ||
| @@ -65,11 +66,6 @@ | |||
| 65 | 66 | ||
| 66 | #define MAX_NAME_LEN 20 | 67 | #define MAX_NAME_LEN 20 |
| 67 | 68 | ||
| 68 | #define ACPI_VIDEO_DISPLAY_CRT 1 | ||
| 69 | #define ACPI_VIDEO_DISPLAY_TV 2 | ||
| 70 | #define ACPI_VIDEO_DISPLAY_DVI 3 | ||
| 71 | #define ACPI_VIDEO_DISPLAY_LCD 4 | ||
| 72 | |||
| 73 | #define _COMPONENT ACPI_VIDEO_COMPONENT | 69 | #define _COMPONENT ACPI_VIDEO_COMPONENT |
| 74 | ACPI_MODULE_NAME("video"); | 70 | ACPI_MODULE_NAME("video"); |
| 75 | 71 | ||
| @@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 1007 | result = acpi_video_init_brightness(device); | 1003 | result = acpi_video_init_brightness(device); |
| 1008 | if (result) | 1004 | if (result) |
| 1009 | return; | 1005 | return; |
| 1010 | name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); | 1006 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); |
| 1011 | if (!name) | 1007 | if (!name) |
| 1012 | return; | 1008 | return; |
| 1009 | count++; | ||
| 1013 | 1010 | ||
| 1014 | sprintf(name, "acpi_video%d", count++); | ||
| 1015 | memset(&props, 0, sizeof(struct backlight_properties)); | 1011 | memset(&props, 0, sizeof(struct backlight_properties)); |
| 1016 | props.max_brightness = device->brightness->count - 3; | 1012 | props.max_brightness = device->brightness->count - 3; |
| 1017 | device->backlight = backlight_device_register(name, NULL, device, | 1013 | device->backlight = backlight_device_register(name, NULL, device, |
| @@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 1067 | if (device->cap._DCS && device->cap._DSS) { | 1063 | if (device->cap._DCS && device->cap._DSS) { |
| 1068 | static int count; | 1064 | static int count; |
| 1069 | char *name; | 1065 | char *name; |
| 1070 | name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); | 1066 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); |
| 1071 | if (!name) | 1067 | if (!name) |
| 1072 | return; | 1068 | return; |
| 1073 | sprintf(name, "acpi_video%d", count++); | 1069 | count++; |
| 1074 | device->output_dev = video_output_register(name, | 1070 | device->output_dev = video_output_register(name, |
| 1075 | NULL, device, &acpi_output_properties); | 1071 | NULL, device, &acpi_output_properties); |
| 1076 | kfree(name); | 1072 | kfree(name); |
| @@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id | |||
| 1748 | } | 1744 | } |
| 1749 | 1745 | ||
| 1750 | static int | 1746 | static int |
| 1747 | acpi_video_get_device_type(struct acpi_video_bus *video, | ||
| 1748 | unsigned long device_id) | ||
| 1749 | { | ||
| 1750 | struct acpi_video_enumerated_device *ids; | ||
| 1751 | int i; | ||
| 1752 | |||
| 1753 | for (i = 0; i < video->attached_count; i++) { | ||
| 1754 | ids = &video->attached_array[i]; | ||
| 1755 | if ((ids->value.int_val & 0xffff) == device_id) | ||
| 1756 | return ids->value.int_val; | ||
| 1757 | } | ||
| 1758 | |||
| 1759 | return 0; | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | static int | ||
| 1751 | acpi_video_bus_get_one_device(struct acpi_device *device, | 1763 | acpi_video_bus_get_one_device(struct acpi_device *device, |
| 1752 | struct acpi_video_bus *video) | 1764 | struct acpi_video_bus *video) |
| 1753 | { | 1765 | { |
| 1754 | unsigned long long device_id; | 1766 | unsigned long long device_id; |
| 1755 | int status; | 1767 | int status, device_type; |
| 1756 | struct acpi_video_device *data; | 1768 | struct acpi_video_device *data; |
| 1757 | struct acpi_video_device_attrib* attribute; | 1769 | struct acpi_video_device_attrib* attribute; |
| 1758 | 1770 | ||
| @@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device, | |||
| 1797 | } | 1809 | } |
| 1798 | if(attribute->bios_can_detect) | 1810 | if(attribute->bios_can_detect) |
| 1799 | data->flags.bios = 1; | 1811 | data->flags.bios = 1; |
| 1800 | } else | 1812 | } else { |
| 1801 | data->flags.unknown = 1; | 1813 | /* Check for legacy IDs */ |
| 1814 | device_type = acpi_video_get_device_type(video, | ||
| 1815 | device_id); | ||
| 1816 | /* Ignore bits 16 and 18-20 */ | ||
| 1817 | switch (device_type & 0xffe2ffff) { | ||
| 1818 | case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR: | ||
| 1819 | data->flags.crt = 1; | ||
| 1820 | break; | ||
| 1821 | case ACPI_VIDEO_DISPLAY_LEGACY_PANEL: | ||
| 1822 | data->flags.lcd = 1; | ||
| 1823 | break; | ||
| 1824 | case ACPI_VIDEO_DISPLAY_LEGACY_TV: | ||
| 1825 | data->flags.tvout = 1; | ||
| 1826 | break; | ||
| 1827 | default: | ||
| 1828 | data->flags.unknown = 1; | ||
| 1829 | } | ||
| 1830 | } | ||
| 1802 | 1831 | ||
| 1803 | acpi_video_device_bind(video, data); | 1832 | acpi_video_device_bind(video, data); |
| 1804 | acpi_video_device_find_cap(data); | 1833 | acpi_video_device_find_cap(data); |
| @@ -2032,6 +2061,71 @@ out: | |||
| 2032 | return result; | 2061 | return result; |
| 2033 | } | 2062 | } |
| 2034 | 2063 | ||
| 2064 | int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, | ||
| 2065 | void **edid) | ||
| 2066 | { | ||
| 2067 | struct acpi_video_bus *video; | ||
| 2068 | struct acpi_video_device *video_device; | ||
| 2069 | union acpi_object *buffer = NULL; | ||
| 2070 | acpi_status status; | ||
| 2071 | int i, length; | ||
| 2072 | |||
| 2073 | if (!device || !acpi_driver_data(device)) | ||
| 2074 | return -EINVAL; | ||
| 2075 | |||
| 2076 | video = acpi_driver_data(device); | ||
| 2077 | |||
| 2078 | for (i = 0; i < video->attached_count; i++) { | ||
| 2079 | video_device = video->attached_array[i].bind_info; | ||
| 2080 | length = 256; | ||
| 2081 | |||
| 2082 | if (!video_device) | ||
| 2083 | continue; | ||
| 2084 | |||
| 2085 | if (type) { | ||
| 2086 | switch (type) { | ||
| 2087 | case ACPI_VIDEO_DISPLAY_CRT: | ||
| 2088 | if (!video_device->flags.crt) | ||
| 2089 | continue; | ||
| 2090 | break; | ||
| 2091 | case ACPI_VIDEO_DISPLAY_TV: | ||
| 2092 | if (!video_device->flags.tvout) | ||
| 2093 | continue; | ||
| 2094 | break; | ||
| 2095 | case ACPI_VIDEO_DISPLAY_DVI: | ||
| 2096 | if (!video_device->flags.dvi) | ||
| 2097 | continue; | ||
| 2098 | break; | ||
| 2099 | case ACPI_VIDEO_DISPLAY_LCD: | ||
| 2100 | if (!video_device->flags.lcd) | ||
| 2101 | continue; | ||
| 2102 | break; | ||
| 2103 | } | ||
| 2104 | } else if (video_device->device_id != device_id) { | ||
| 2105 | continue; | ||
| 2106 | } | ||
| 2107 | |||
| 2108 | status = acpi_video_device_EDID(video_device, &buffer, length); | ||
| 2109 | |||
| 2110 | if (ACPI_FAILURE(status) || !buffer || | ||
| 2111 | buffer->type != ACPI_TYPE_BUFFER) { | ||
| 2112 | length = 128; | ||
| 2113 | status = acpi_video_device_EDID(video_device, &buffer, | ||
| 2114 | length); | ||
| 2115 | if (ACPI_FAILURE(status) || !buffer || | ||
| 2116 | buffer->type != ACPI_TYPE_BUFFER) { | ||
| 2117 | continue; | ||
| 2118 | } | ||
| 2119 | } | ||
| 2120 | |||
| 2121 | *edid = buffer->buffer.pointer; | ||
| 2122 | return length; | ||
| 2123 | } | ||
| 2124 | |||
| 2125 | return -ENODEV; | ||
| 2126 | } | ||
| 2127 | EXPORT_SYMBOL(acpi_video_get_edid); | ||
| 2128 | |||
| 2035 | static int | 2129 | static int |
| 2036 | acpi_video_bus_get_devices(struct acpi_video_bus *video, | 2130 | acpi_video_bus_get_devices(struct acpi_video_bus *video, |
| 2037 | struct acpi_device *device) | 2131 | struct acpi_device *device) |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index fc2f26b9b407..c5fef01b3c95 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
| @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str) | |||
| 250 | ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; | 250 | ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; |
| 251 | if (!strcmp("video", str)) | 251 | if (!strcmp("video", str)) |
| 252 | acpi_video_support |= | 252 | acpi_video_support |= |
| 253 | ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; | 253 | ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; |
| 254 | } | 254 | } |
| 255 | return 1; | 255 | return 1; |
| 256 | } | 256 | } |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e68541f662b9..73f883333a0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -57,6 +57,8 @@ config SATA_PMP | |||
| 57 | This option adds support for SATA Port Multipliers | 57 | This option adds support for SATA Port Multipliers |
| 58 | (the SATA version of an ethernet hub, or SAS expander). | 58 | (the SATA version of an ethernet hub, or SAS expander). |
| 59 | 59 | ||
| 60 | comment "Controllers with non-SFF native interface" | ||
| 61 | |||
| 60 | config SATA_AHCI | 62 | config SATA_AHCI |
| 61 | tristate "AHCI SATA support" | 63 | tristate "AHCI SATA support" |
| 62 | depends on PCI | 64 | depends on PCI |
| @@ -73,11 +75,12 @@ config SATA_AHCI_PLATFORM | |||
| 73 | 75 | ||
| 74 | If unsure, say N. | 76 | If unsure, say N. |
| 75 | 77 | ||
| 76 | config SATA_SIL24 | 78 | config SATA_FSL |
| 77 | tristate "Silicon Image 3124/3132 SATA support" | 79 | tristate "Freescale 3.0Gbps SATA support" |
| 78 | depends on PCI | 80 | depends on FSL_SOC |
| 79 | help | 81 | help |
| 80 | This option enables support for Silicon Image 3124/3132 Serial ATA. | 82 | This option enables support for Freescale 3.0Gbps SATA controller. |
| 83 | It can be found on MPC837x and MPC8315. | ||
| 81 | 84 | ||
| 82 | If unsure, say N. | 85 | If unsure, say N. |
| 83 | 86 | ||
| @@ -87,12 +90,11 @@ config SATA_INIC162X | |||
| 87 | help | 90 | help |
| 88 | This option enables support for Initio 162x Serial ATA. | 91 | This option enables support for Initio 162x Serial ATA. |
| 89 | 92 | ||
| 90 | config SATA_FSL | 93 | config SATA_SIL24 |
| 91 | tristate "Freescale 3.0Gbps SATA support" | 94 | tristate "Silicon Image 3124/3132 SATA support" |
| 92 | depends on FSL_SOC | 95 | depends on PCI |
| 93 | help | 96 | help |
| 94 | This option enables support for Freescale 3.0Gbps SATA controller. | 97 | This option enables support for Silicon Image 3124/3132 Serial ATA. |
| 95 | It can be found on MPC837x and MPC8315. | ||
| 96 | 98 | ||
| 97 | If unsure, say N. | 99 | If unsure, say N. |
| 98 | 100 | ||
| @@ -116,15 +118,65 @@ config ATA_SFF | |||
| 116 | 118 | ||
| 117 | if ATA_SFF | 119 | if ATA_SFF |
| 118 | 120 | ||
| 119 | config SATA_SVW | 121 | comment "SFF controllers with custom DMA interface" |
| 120 | tristate "ServerWorks Frodo / Apple K2 SATA support" | 122 | |
| 123 | config PDC_ADMA | ||
| 124 | tristate "Pacific Digital ADMA support" | ||
| 121 | depends on PCI | 125 | depends on PCI |
| 122 | help | 126 | help |
| 123 | This option enables support for Broadcom/Serverworks/Apple K2 | 127 | This option enables support for Pacific Digital ADMA controllers |
| 124 | SATA support. | 128 | |
| 129 | If unsure, say N. | ||
| 130 | |||
| 131 | config PATA_MPC52xx | ||
| 132 | tristate "Freescale MPC52xx SoC internal IDE" | ||
| 133 | depends on PPC_MPC52xx && PPC_BESTCOMM | ||
| 134 | select PPC_BESTCOMM_ATA | ||
| 135 | help | ||
| 136 | This option enables support for integrated IDE controller | ||
| 137 | of the Freescale MPC52xx SoC. | ||
| 138 | |||
| 139 | If unsure, say N. | ||
| 140 | |||
| 141 | config PATA_OCTEON_CF | ||
| 142 | tristate "OCTEON Boot Bus Compact Flash support" | ||
| 143 | depends on CPU_CAVIUM_OCTEON | ||
| 144 | help | ||
| 145 | This option enables a polled compact flash driver for use with | ||
| 146 | compact flash cards attached to the OCTEON boot bus. | ||
| 147 | |||
| 148 | If unsure, say N. | ||
| 149 | |||
| 150 | config SATA_QSTOR | ||
| 151 | tristate "Pacific Digital SATA QStor support" | ||
| 152 | depends on PCI | ||
| 153 | help | ||
| 154 | This option enables support for Pacific Digital Serial ATA QStor. | ||
| 155 | |||
| 156 | If unsure, say N. | ||
| 157 | |||
| 158 | config SATA_SX4 | ||
| 159 | tristate "Promise SATA SX4 support (Experimental)" | ||
| 160 | depends on PCI && EXPERIMENTAL | ||
| 161 | help | ||
| 162 | This option enables support for Promise Serial ATA SX4. | ||
| 125 | 163 | ||
| 126 | If unsure, say N. | 164 | If unsure, say N. |
| 127 | 165 | ||
| 166 | config ATA_BMDMA | ||
| 167 | bool "ATA BMDMA support" | ||
| 168 | default y | ||
| 169 | help | ||
| 170 | This option adds support for SFF ATA controllers with BMDMA | ||
| 171 | capability. BMDMA stands for bus-master DMA and the | ||
| 172 | de-facto DMA interface for SFF controllers. | ||
| 173 | |||
| 174 | If unuser, say Y. | ||
| 175 | |||
| 176 | if ATA_BMDMA | ||
| 177 | |||
| 178 | comment "SATA SFF controllers with BMDMA" | ||
| 179 | |||
| 128 | config ATA_PIIX | 180 | config ATA_PIIX |
| 129 | tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" | 181 | tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" |
| 130 | depends on PCI | 182 | depends on PCI |
| @@ -152,22 +204,6 @@ config SATA_NV | |||
| 152 | 204 | ||
| 153 | If unsure, say N. | 205 | If unsure, say N. |
| 154 | 206 | ||
| 155 | config PDC_ADMA | ||
| 156 | tristate "Pacific Digital ADMA support" | ||
| 157 | depends on PCI | ||
| 158 | help | ||
| 159 | This option enables support for Pacific Digital ADMA controllers | ||
| 160 | |||
| 161 | If unsure, say N. | ||
| 162 | |||
| 163 | config SATA_QSTOR | ||
| 164 | tristate "Pacific Digital SATA QStor support" | ||
| 165 | depends on PCI | ||
| 166 | help | ||
| 167 | This option enables support for Pacific Digital Serial ATA QStor. | ||
| 168 | |||
| 169 | If unsure, say N. | ||
| 170 | |||
| 171 | config SATA_PROMISE | 207 | config SATA_PROMISE |
| 172 | tristate "Promise SATA TX2/TX4 support" | 208 | tristate "Promise SATA TX2/TX4 support" |
| 173 | depends on PCI | 209 | depends on PCI |
| @@ -176,14 +212,6 @@ config SATA_PROMISE | |||
| 176 | 212 | ||
| 177 | If unsure, say N. | 213 | If unsure, say N. |
| 178 | 214 | ||
| 179 | config SATA_SX4 | ||
| 180 | tristate "Promise SATA SX4 support (Experimental)" | ||
| 181 | depends on PCI && EXPERIMENTAL | ||
| 182 | help | ||
| 183 | This option enables support for Promise Serial ATA SX4. | ||
| 184 | |||
| 185 | If unsure, say N. | ||
| 186 | |||
| 187 | config SATA_SIL | 215 | config SATA_SIL |
| 188 | tristate "Silicon Image SATA support" | 216 | tristate "Silicon Image SATA support" |
| 189 | depends on PCI | 217 | depends on PCI |
| @@ -203,6 +231,15 @@ config SATA_SIS | |||
| 203 | enable the PATA_SIS driver in the config. | 231 | enable the PATA_SIS driver in the config. |
| 204 | If unsure, say N. | 232 | If unsure, say N. |
| 205 | 233 | ||
| 234 | config SATA_SVW | ||
| 235 | tristate "ServerWorks Frodo / Apple K2 SATA support" | ||
| 236 | depends on PCI | ||
| 237 | help | ||
| 238 | This option enables support for Broadcom/Serverworks/Apple K2 | ||
| 239 | SATA support. | ||
| 240 | |||
| 241 | If unsure, say N. | ||
| 242 | |||
| 206 | config SATA_ULI | 243 | config SATA_ULI |
| 207 | tristate "ULi Electronics SATA support" | 244 | tristate "ULi Electronics SATA support" |
| 208 | depends on PCI | 245 | depends on PCI |
| @@ -227,14 +264,7 @@ config SATA_VITESSE | |||
| 227 | 264 | ||
| 228 | If unsure, say N. | 265 | If unsure, say N. |
| 229 | 266 | ||
| 230 | config PATA_ACPI | 267 | comment "PATA SFF controllers with BMDMA" |
| 231 | tristate "ACPI firmware driver for PATA" | ||
| 232 | depends on ATA_ACPI | ||
| 233 | help | ||
| 234 | This option enables an ACPI method driver which drives | ||
| 235 | motherboard PATA controller interfaces through the ACPI | ||
| 236 | firmware in the BIOS. This driver can sometimes handle | ||
| 237 | otherwise unsupported hardware. | ||
| 238 | 268 | ||
| 239 | config PATA_ALI | 269 | config PATA_ALI |
| 240 | tristate "ALi PATA support" | 270 | tristate "ALi PATA support" |
| @@ -262,40 +292,30 @@ config PATA_ARTOP | |||
| 262 | 292 | ||
| 263 | If unsure, say N. | 293 | If unsure, say N. |
| 264 | 294 | ||
| 265 | config PATA_ATP867X | 295 | config PATA_ATIIXP |
| 266 | tristate "ARTOP/Acard ATP867X PATA support" | 296 | tristate "ATI PATA support" |
| 267 | depends on PCI | 297 | depends on PCI |
| 268 | help | 298 | help |
| 269 | This option enables support for ARTOP/Acard ATP867X PATA | 299 | This option enables support for the ATI ATA interfaces |
| 270 | controllers. | 300 | found on the many ATI chipsets. |
| 271 | |||
| 272 | If unsure, say N. | ||
| 273 | |||
| 274 | config PATA_AT32 | ||
| 275 | tristate "Atmel AVR32 PATA support (Experimental)" | ||
| 276 | depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL | ||
| 277 | help | ||
| 278 | This option enables support for the IDE devices on the | ||
| 279 | Atmel AT32AP platform. | ||
| 280 | 301 | ||
| 281 | If unsure, say N. | 302 | If unsure, say N. |
| 282 | 303 | ||
| 283 | config PATA_ATIIXP | 304 | config PATA_ATP867X |
| 284 | tristate "ATI PATA support" | 305 | tristate "ARTOP/Acard ATP867X PATA support" |
| 285 | depends on PCI | 306 | depends on PCI |
| 286 | help | 307 | help |
| 287 | This option enables support for the ATI ATA interfaces | 308 | This option enables support for ARTOP/Acard ATP867X PATA |
| 288 | found on the many ATI chipsets. | 309 | controllers. |
| 289 | 310 | ||
| 290 | If unsure, say N. | 311 | If unsure, say N. |
| 291 | 312 | ||
| 292 | config PATA_CMD640_PCI | 313 | config PATA_BF54X |
| 293 | tristate "CMD640 PCI PATA support (Experimental)" | 314 | tristate "Blackfin 54x ATAPI support" |
| 294 | depends on PCI && EXPERIMENTAL | 315 | depends on BF542 || BF548 || BF549 |
| 295 | help | 316 | help |
| 296 | This option enables support for the CMD640 PCI IDE | 317 | This option enables support for the built-in ATAPI controller on |
| 297 | interface chip. Only the primary channel is currently | 318 | Blackfin 54x family chips. |
| 298 | supported. | ||
| 299 | 319 | ||
| 300 | If unsure, say N. | 320 | If unsure, say N. |
| 301 | 321 | ||
| @@ -362,15 +382,6 @@ config PATA_EFAR | |||
| 362 | 382 | ||
| 363 | If unsure, say N. | 383 | If unsure, say N. |
| 364 | 384 | ||
| 365 | config ATA_GENERIC | ||
| 366 | tristate "Generic ATA support" | ||
| 367 | depends on PCI | ||
| 368 | help | ||
| 369 | This option enables support for generic BIOS configured | ||
| 370 | ATA controllers via the new ATA layer | ||
| 371 | |||
| 372 | If unsure, say N. | ||
| 373 | |||
| 374 | config PATA_HPT366 | 385 | config PATA_HPT366 |
| 375 | tristate "HPT 366/368 PATA support" | 386 | tristate "HPT 366/368 PATA support" |
| 376 | depends on PCI | 387 | depends on PCI |
| @@ -415,12 +426,20 @@ config PATA_HPT3X3_DMA | |||
| 415 | controllers. Enable with care as there are still some | 426 | controllers. Enable with care as there are still some |
| 416 | problems with DMA on this chipset. | 427 | problems with DMA on this chipset. |
| 417 | 428 | ||
| 418 | config PATA_ISAPNP | 429 | config PATA_ICSIDE |
| 419 | tristate "ISA Plug and Play PATA support" | 430 | tristate "Acorn ICS PATA support" |
| 420 | depends on ISAPNP | 431 | depends on ARM && ARCH_ACORN |
| 421 | help | 432 | help |
| 422 | This option enables support for ISA plug & play ATA | 433 | On Acorn systems, say Y here if you wish to use the ICS PATA |
| 423 | controllers such as those found on old soundcards. | 434 | interface card. This is not required for ICS partition support. |
| 435 | If you are unsure, say N to this. | ||
| 436 | |||
| 437 | config PATA_IT8213 | ||
| 438 | tristate "IT8213 PATA support (Experimental)" | ||
| 439 | depends on PCI && EXPERIMENTAL | ||
| 440 | help | ||
| 441 | This option enables support for the ITE 821 PATA | ||
| 442 | controllers via the new ATA layer. | ||
| 424 | 443 | ||
| 425 | If unsure, say N. | 444 | If unsure, say N. |
| 426 | 445 | ||
| @@ -434,15 +453,6 @@ config PATA_IT821X | |||
| 434 | 453 | ||
| 435 | If unsure, say N. | 454 | If unsure, say N. |
| 436 | 455 | ||
| 437 | config PATA_IT8213 | ||
| 438 | tristate "IT8213 PATA support (Experimental)" | ||
| 439 | depends on PCI && EXPERIMENTAL | ||
| 440 | help | ||
| 441 | This option enables support for the ITE 821 PATA | ||
| 442 | controllers via the new ATA layer. | ||
| 443 | |||
| 444 | If unsure, say N. | ||
| 445 | |||
| 446 | config PATA_JMICRON | 456 | config PATA_JMICRON |
| 447 | tristate "JMicron PATA support" | 457 | tristate "JMicron PATA support" |
| 448 | depends on PCI | 458 | depends on PCI |
| @@ -452,23 +462,14 @@ config PATA_JMICRON | |||
| 452 | 462 | ||
| 453 | If unsure, say N. | 463 | If unsure, say N. |
| 454 | 464 | ||
| 455 | config PATA_LEGACY | 465 | config PATA_MACIO |
| 456 | tristate "Legacy ISA PATA support (Experimental)" | 466 | tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" |
| 457 | depends on (ISA || PCI) && EXPERIMENTAL | 467 | depends on PPC_PMAC |
| 458 | help | ||
| 459 | This option enables support for ISA/VLB/PCI bus legacy PATA | ||
| 460 | ports and allows them to be accessed via the new ATA layer. | ||
| 461 | |||
| 462 | If unsure, say N. | ||
| 463 | |||
| 464 | config PATA_TRIFLEX | ||
| 465 | tristate "Compaq Triflex PATA support" | ||
| 466 | depends on PCI | ||
| 467 | help | 468 | help |
| 468 | Enable support for the Compaq 'Triflex' IDE controller as found | 469 | Most IDE capable PowerMacs have IDE busses driven by a variant |
| 469 | on many Compaq Pentium-Pro systems, via the new ATA layer. | 470 | of this controller which is part of the Apple chipset used on |
| 470 | 471 | most PowerMac models. Some models have multiple busses using | |
| 471 | If unsure, say N. | 472 | different chipsets, though generally, MacIO is one of them. |
| 472 | 473 | ||
| 473 | config PATA_MARVELL | 474 | config PATA_MARVELL |
| 474 | tristate "Marvell PATA support via legacy mode" | 475 | tristate "Marvell PATA support via legacy mode" |
| @@ -481,32 +482,6 @@ config PATA_MARVELL | |||
| 481 | 482 | ||
| 482 | If unsure, say N. | 483 | If unsure, say N. |
| 483 | 484 | ||
| 484 | config PATA_MPC52xx | ||
| 485 | tristate "Freescale MPC52xx SoC internal IDE" | ||
| 486 | depends on PPC_MPC52xx && PPC_BESTCOMM | ||
| 487 | select PPC_BESTCOMM_ATA | ||
| 488 | help | ||
| 489 | This option enables support for integrated IDE controller | ||
| 490 | of the Freescale MPC52xx SoC. | ||
| 491 | |||
| 492 | If unsure, say N. | ||
| 493 | |||
| 494 | config PATA_MPIIX | ||
| 495 | tristate "Intel PATA MPIIX support" | ||
| 496 | depends on PCI | ||
| 497 | help | ||
| 498 | This option enables support for MPIIX PATA support. | ||
| 499 | |||
| 500 | If unsure, say N. | ||
| 501 | |||
| 502 | config PATA_OLDPIIX | ||
| 503 | tristate "Intel PATA old PIIX support" | ||
| 504 | depends on PCI | ||
| 505 | help | ||
| 506 | This option enables support for early PIIX PATA support. | ||
| 507 | |||
| 508 | If unsure, say N. | ||
| 509 | |||
| 510 | config PATA_NETCELL | 485 | config PATA_NETCELL |
| 511 | tristate "NETCELL Revolution RAID support" | 486 | tristate "NETCELL Revolution RAID support" |
| 512 | depends on PCI | 487 | depends on PCI |
| @@ -525,15 +500,6 @@ config PATA_NINJA32 | |||
| 525 | 500 | ||
| 526 | If unsure, say N. | 501 | If unsure, say N. |
| 527 | 502 | ||
| 528 | config PATA_NS87410 | ||
| 529 | tristate "Nat Semi NS87410 PATA support" | ||
| 530 | depends on PCI | ||
| 531 | help | ||
| 532 | This option enables support for the National Semiconductor | ||
| 533 | NS87410 PCI-IDE controller. | ||
| 534 | |||
| 535 | If unsure, say N. | ||
| 536 | |||
| 537 | config PATA_NS87415 | 503 | config PATA_NS87415 |
| 538 | tristate "Nat Semi NS87415 PATA support" | 504 | tristate "Nat Semi NS87415 PATA support" |
| 539 | depends on PCI | 505 | depends on PCI |
| @@ -543,12 +509,11 @@ config PATA_NS87415 | |||
| 543 | 509 | ||
| 544 | If unsure, say N. | 510 | If unsure, say N. |
| 545 | 511 | ||
| 546 | config PATA_OPTI | 512 | config PATA_OLDPIIX |
| 547 | tristate "OPTI621/6215 PATA support (Very Experimental)" | 513 | tristate "Intel PATA old PIIX support" |
| 548 | depends on PCI && EXPERIMENTAL | 514 | depends on PCI |
| 549 | help | 515 | help |
| 550 | This option enables full PIO support for the early Opti ATA | 516 | This option enables support for early PIIX PATA support. |
| 551 | controllers found on some old motherboards. | ||
| 552 | 517 | ||
| 553 | If unsure, say N. | 518 | If unsure, say N. |
| 554 | 519 | ||
| @@ -562,24 +527,6 @@ config PATA_OPTIDMA | |||
| 562 | 527 | ||
| 563 | If unsure, say N. | 528 | If unsure, say N. |
| 564 | 529 | ||
| 565 | config PATA_PALMLD | ||
| 566 | tristate "Palm LifeDrive PATA support" | ||
| 567 | depends on MACH_PALMLD | ||
| 568 | help | ||
| 569 | This option enables support for Palm LifeDrive's internal ATA | ||
| 570 | port via the new ATA layer. | ||
| 571 | |||
| 572 | If unsure, say N. | ||
| 573 | |||
| 574 | config PATA_PCMCIA | ||
| 575 | tristate "PCMCIA PATA support" | ||
| 576 | depends on PCMCIA | ||
| 577 | help | ||
| 578 | This option enables support for PCMCIA ATA interfaces, including | ||
| 579 | compact flash card adapters via the new ATA layer. | ||
| 580 | |||
| 581 | If unsure, say N. | ||
| 582 | |||
| 583 | config PATA_PDC2027X | 530 | config PATA_PDC2027X |
| 584 | tristate "Promise PATA 2027x support" | 531 | tristate "Promise PATA 2027x support" |
| 585 | depends on PCI | 532 | depends on PCI |
| @@ -597,12 +544,6 @@ config PATA_PDC_OLD | |||
| 597 | 544 | ||
| 598 | If unsure, say N. | 545 | If unsure, say N. |
| 599 | 546 | ||
| 600 | config PATA_QDI | ||
| 601 | tristate "QDI VLB PATA support" | ||
| 602 | depends on ISA | ||
| 603 | help | ||
| 604 | Support for QDI 6500 and 6580 PATA controllers on VESA local bus. | ||
| 605 | |||
| 606 | config PATA_RADISYS | 547 | config PATA_RADISYS |
| 607 | tristate "RADISYS 82600 PATA support (Experimental)" | 548 | tristate "RADISYS 82600 PATA support (Experimental)" |
| 608 | depends on PCI && EXPERIMENTAL | 549 | depends on PCI && EXPERIMENTAL |
| @@ -612,15 +553,6 @@ config PATA_RADISYS | |||
| 612 | 553 | ||
| 613 | If unsure, say N. | 554 | If unsure, say N. |
| 614 | 555 | ||
| 615 | config PATA_RB532 | ||
| 616 | tristate "RouterBoard 532 PATA CompactFlash support" | ||
| 617 | depends on MIKROTIK_RB532 | ||
| 618 | help | ||
| 619 | This option enables support for the RouterBoard 532 | ||
| 620 | PATA CompactFlash controller. | ||
| 621 | |||
| 622 | If unsure, say N. | ||
| 623 | |||
| 624 | config PATA_RDC | 556 | config PATA_RDC |
| 625 | tristate "RDC PATA support" | 557 | tristate "RDC PATA support" |
| 626 | depends on PCI | 558 | depends on PCI |
| @@ -631,21 +563,30 @@ config PATA_RDC | |||
| 631 | 563 | ||
| 632 | If unsure, say N. | 564 | If unsure, say N. |
| 633 | 565 | ||
| 634 | config PATA_RZ1000 | 566 | config PATA_SC1200 |
| 635 | tristate "PC Tech RZ1000 PATA support" | 567 | tristate "SC1200 PATA support" |
| 636 | depends on PCI | 568 | depends on PCI |
| 637 | help | 569 | help |
| 638 | This option enables basic support for the PC Tech RZ1000/1 | 570 | This option enables support for the NatSemi/AMD SC1200 SoC |
| 639 | PATA controllers via the new ATA layer | 571 | companion chip used with the Geode processor family. |
| 640 | 572 | ||
| 641 | If unsure, say N. | 573 | If unsure, say N. |
| 642 | 574 | ||
| 643 | config PATA_SC1200 | 575 | config PATA_SCC |
| 644 | tristate "SC1200 PATA support" | 576 | tristate "Toshiba's Cell Reference Set IDE support" |
| 577 | depends on PCI && PPC_CELLEB | ||
| 578 | help | ||
| 579 | This option enables support for the built-in IDE controller on | ||
| 580 | Toshiba Cell Reference Board. | ||
| 581 | |||
| 582 | If unsure, say N. | ||
| 583 | |||
| 584 | config PATA_SCH | ||
| 585 | tristate "Intel SCH PATA support" | ||
| 645 | depends on PCI | 586 | depends on PCI |
| 646 | help | 587 | help |
| 647 | This option enables support for the NatSemi/AMD SC1200 SoC | 588 | This option enables support for Intel SCH PATA on the Intel |
| 648 | companion chip used with the Geode processor family. | 589 | SCH (US15W, US15L, UL11L) series host controllers. |
| 649 | 590 | ||
| 650 | If unsure, say N. | 591 | If unsure, say N. |
| 651 | 592 | ||
| @@ -683,6 +624,15 @@ config PATA_TOSHIBA | |||
| 683 | 624 | ||
| 684 | If unsure, say N. | 625 | If unsure, say N. |
| 685 | 626 | ||
| 627 | config PATA_TRIFLEX | ||
| 628 | tristate "Compaq Triflex PATA support" | ||
| 629 | depends on PCI | ||
| 630 | help | ||
| 631 | Enable support for the Compaq 'Triflex' IDE controller as found | ||
| 632 | on many Compaq Pentium-Pro systems, via the new ATA layer. | ||
| 633 | |||
| 634 | If unsure, say N. | ||
| 635 | |||
| 686 | config PATA_VIA | 636 | config PATA_VIA |
| 687 | tristate "VIA PATA support" | 637 | tristate "VIA PATA support" |
| 688 | depends on PCI | 638 | depends on PCI |
| @@ -701,12 +651,99 @@ config PATA_WINBOND | |||
| 701 | 651 | ||
| 702 | If unsure, say N. | 652 | If unsure, say N. |
| 703 | 653 | ||
| 704 | config PATA_WINBOND_VLB | 654 | endif # ATA_BMDMA |
| 705 | tristate "Winbond W83759A VLB PATA support (Experimental)" | 655 | |
| 706 | depends on ISA && EXPERIMENTAL | 656 | comment "PIO-only SFF controllers" |
| 657 | |||
| 658 | config PATA_AT32 | ||
| 659 | tristate "Atmel AVR32 PATA support (Experimental)" | ||
| 660 | depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL | ||
| 707 | help | 661 | help |
| 708 | Support for the Winbond W83759A controller on Vesa Local Bus | 662 | This option enables support for the IDE devices on the |
| 709 | systems. | 663 | Atmel AT32AP platform. |
| 664 | |||
| 665 | If unsure, say N. | ||
| 666 | |||
| 667 | config PATA_AT91 | ||
| 668 | tristate "PATA support for AT91SAM9260" | ||
| 669 | depends on ARM && ARCH_AT91 | ||
| 670 | help | ||
| 671 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. | ||
| 672 | |||
| 673 | If unsure, say N. | ||
| 674 | |||
| 675 | config PATA_CMD640_PCI | ||
| 676 | tristate "CMD640 PCI PATA support (Experimental)" | ||
| 677 | depends on PCI && EXPERIMENTAL | ||
| 678 | help | ||
| 679 | This option enables support for the CMD640 PCI IDE | ||
| 680 | interface chip. Only the primary channel is currently | ||
| 681 | supported. | ||
| 682 | |||
| 683 | If unsure, say N. | ||
| 684 | |||
| 685 | config PATA_ISAPNP | ||
| 686 | tristate "ISA Plug and Play PATA support" | ||
| 687 | depends on ISAPNP | ||
| 688 | help | ||
| 689 | This option enables support for ISA plug & play ATA | ||
| 690 | controllers such as those found on old soundcards. | ||
| 691 | |||
| 692 | If unsure, say N. | ||
| 693 | |||
| 694 | config PATA_IXP4XX_CF | ||
| 695 | tristate "IXP4XX Compact Flash support" | ||
| 696 | depends on ARCH_IXP4XX | ||
| 697 | help | ||
| 698 | This option enables support for a Compact Flash connected on | ||
| 699 | the ixp4xx expansion bus. This driver had been written for | ||
| 700 | Loft/Avila boards in mind but can work with others. | ||
| 701 | |||
| 702 | If unsure, say N. | ||
| 703 | |||
| 704 | config PATA_MPIIX | ||
| 705 | tristate "Intel PATA MPIIX support" | ||
| 706 | depends on PCI | ||
| 707 | help | ||
| 708 | This option enables support for MPIIX PATA support. | ||
| 709 | |||
| 710 | If unsure, say N. | ||
| 711 | |||
| 712 | config PATA_NS87410 | ||
| 713 | tristate "Nat Semi NS87410 PATA support" | ||
| 714 | depends on PCI | ||
| 715 | help | ||
| 716 | This option enables support for the National Semiconductor | ||
| 717 | NS87410 PCI-IDE controller. | ||
| 718 | |||
| 719 | If unsure, say N. | ||
| 720 | |||
| 721 | config PATA_OPTI | ||
| 722 | tristate "OPTI621/6215 PATA support (Very Experimental)" | ||
| 723 | depends on PCI && EXPERIMENTAL | ||
| 724 | help | ||
| 725 | This option enables full PIO support for the early Opti ATA | ||
| 726 | controllers found on some old motherboards. | ||
| 727 | |||
| 728 | If unsure, say N. | ||
| 729 | |||
| 730 | config PATA_PALMLD | ||
| 731 | tristate "Palm LifeDrive PATA support" | ||
| 732 | depends on MACH_PALMLD | ||
| 733 | help | ||
| 734 | This option enables support for Palm LifeDrive's internal ATA | ||
| 735 | port via the new ATA layer. | ||
| 736 | |||
| 737 | If unsure, say N. | ||
| 738 | |||
| 739 | config PATA_PCMCIA | ||
| 740 | tristate "PCMCIA PATA support" | ||
| 741 | depends on PCMCIA | ||
| 742 | help | ||
| 743 | This option enables support for PCMCIA ATA interfaces, including | ||
| 744 | compact flash card adapters via the new ATA layer. | ||
| 745 | |||
| 746 | If unsure, say N. | ||
| 710 | 747 | ||
| 711 | config HAVE_PATA_PLATFORM | 748 | config HAVE_PATA_PLATFORM |
| 712 | bool | 749 | bool |
| @@ -725,14 +762,6 @@ config PATA_PLATFORM | |||
| 725 | 762 | ||
| 726 | If unsure, say N. | 763 | If unsure, say N. |
| 727 | 764 | ||
| 728 | config PATA_AT91 | ||
| 729 | tristate "PATA support for AT91SAM9260" | ||
| 730 | depends on ARM && ARCH_AT91 | ||
| 731 | help | ||
| 732 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. | ||
| 733 | |||
| 734 | If unsure, say N. | ||
| 735 | |||
| 736 | config PATA_OF_PLATFORM | 765 | config PATA_OF_PLATFORM |
| 737 | tristate "OpenFirmware platform device PATA support" | 766 | tristate "OpenFirmware platform device PATA support" |
| 738 | depends on PATA_PLATFORM && PPC_OF | 767 | depends on PATA_PLATFORM && PPC_OF |
| @@ -743,69 +772,65 @@ config PATA_OF_PLATFORM | |||
| 743 | 772 | ||
| 744 | If unsure, say N. | 773 | If unsure, say N. |
| 745 | 774 | ||
| 746 | config PATA_ICSIDE | 775 | config PATA_QDI |
| 747 | tristate "Acorn ICS PATA support" | 776 | tristate "QDI VLB PATA support" |
| 748 | depends on ARM && ARCH_ACORN | 777 | depends on ISA |
| 749 | help | 778 | help |
| 750 | On Acorn systems, say Y here if you wish to use the ICS PATA | 779 | Support for QDI 6500 and 6580 PATA controllers on VESA local bus. |
| 751 | interface card. This is not required for ICS partition support. | ||
| 752 | If you are unsure, say N to this. | ||
| 753 | 780 | ||
| 754 | config PATA_IXP4XX_CF | 781 | config PATA_RB532 |
| 755 | tristate "IXP4XX Compact Flash support" | 782 | tristate "RouterBoard 532 PATA CompactFlash support" |
| 756 | depends on ARCH_IXP4XX | 783 | depends on MIKROTIK_RB532 |
| 757 | help | 784 | help |
| 758 | This option enables support for a Compact Flash connected on | 785 | This option enables support for the RouterBoard 532 |
| 759 | the ixp4xx expansion bus. This driver had been written for | 786 | PATA CompactFlash controller. |
| 760 | Loft/Avila boards in mind but can work with others. | ||
| 761 | 787 | ||
| 762 | If unsure, say N. | 788 | If unsure, say N. |
| 763 | 789 | ||
| 764 | config PATA_OCTEON_CF | 790 | config PATA_RZ1000 |
| 765 | tristate "OCTEON Boot Bus Compact Flash support" | 791 | tristate "PC Tech RZ1000 PATA support" |
| 766 | depends on CPU_CAVIUM_OCTEON | 792 | depends on PCI |
| 767 | help | 793 | help |
| 768 | This option enables a polled compact flash driver for use with | 794 | This option enables basic support for the PC Tech RZ1000/1 |
| 769 | compact flash cards attached to the OCTEON boot bus. | 795 | PATA controllers via the new ATA layer |
| 770 | 796 | ||
| 771 | If unsure, say N. | 797 | If unsure, say N. |
| 772 | 798 | ||
| 773 | config PATA_SCC | 799 | config PATA_WINBOND_VLB |
| 774 | tristate "Toshiba's Cell Reference Set IDE support" | 800 | tristate "Winbond W83759A VLB PATA support (Experimental)" |
| 775 | depends on PCI && PPC_CELLEB | 801 | depends on ISA && EXPERIMENTAL |
| 776 | help | 802 | help |
| 777 | This option enables support for the built-in IDE controller on | 803 | Support for the Winbond W83759A controller on Vesa Local Bus |
| 778 | Toshiba Cell Reference Board. | 804 | systems. |
| 779 | 805 | ||
| 780 | If unsure, say N. | 806 | comment "Generic fallback / legacy drivers" |
| 781 | 807 | ||
| 782 | config PATA_SCH | 808 | config PATA_ACPI |
| 783 | tristate "Intel SCH PATA support" | 809 | tristate "ACPI firmware driver for PATA" |
| 784 | depends on PCI | 810 | depends on ATA_ACPI && ATA_BMDMA |
| 785 | help | 811 | help |
| 786 | This option enables support for Intel SCH PATA on the Intel | 812 | This option enables an ACPI method driver which drives |
| 787 | SCH (US15W, US15L, UL11L) series host controllers. | 813 | motherboard PATA controller interfaces through the ACPI |
| 788 | 814 | firmware in the BIOS. This driver can sometimes handle | |
| 789 | If unsure, say N. | 815 | otherwise unsupported hardware. |
| 790 | 816 | ||
| 791 | config PATA_BF54X | 817 | config ATA_GENERIC |
| 792 | tristate "Blackfin 54x ATAPI support" | 818 | tristate "Generic ATA support" |
| 793 | depends on BF542 || BF548 || BF549 | 819 | depends on PCI && ATA_BMDMA |
| 794 | help | 820 | help |
| 795 | This option enables support for the built-in ATAPI controller on | 821 | This option enables support for generic BIOS configured |
| 796 | Blackfin 54x family chips. | 822 | ATA controllers via the new ATA layer |
| 797 | 823 | ||
| 798 | If unsure, say N. | 824 | If unsure, say N. |
| 799 | 825 | ||
| 800 | config PATA_MACIO | 826 | config PATA_LEGACY |
| 801 | tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" | 827 | tristate "Legacy ISA PATA support (Experimental)" |
| 802 | depends on PPC_PMAC | 828 | depends on (ISA || PCI) && EXPERIMENTAL |
| 803 | help | 829 | help |
| 804 | Most IDE capable PowerMacs have IDE busses driven by a variant | 830 | This option enables support for ISA/VLB/PCI bus legacy PATA |
| 805 | of this controller which is part of the Apple chipset used on | 831 | ports and allows them to be accessed via the new ATA layer. |
| 806 | most PowerMac models. Some models have multiple busses using | ||
| 807 | different chipsets, though generally, MacIO is one of them. | ||
| 808 | 832 | ||
| 833 | If unsure, say N. | ||
| 809 | 834 | ||
| 810 | endif # ATA_SFF | 835 | endif # ATA_SFF |
| 811 | endif # ATA | 836 | endif # ATA |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d0a93c4ad3ec..7ef89d73df63 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
| @@ -1,33 +1,39 @@ | |||
| 1 | 1 | ||
| 2 | obj-$(CONFIG_ATA) += libata.o | 2 | obj-$(CONFIG_ATA) += libata.o |
| 3 | 3 | ||
| 4 | # non-SFF interface | ||
| 4 | obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o | 5 | obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o |
| 5 | obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o | 6 | obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o |
| 6 | obj-$(CONFIG_SATA_SVW) += sata_svw.o | 7 | obj-$(CONFIG_SATA_FSL) += sata_fsl.o |
| 8 | obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o | ||
| 9 | obj-$(CONFIG_SATA_SIL24) += sata_sil24.o | ||
| 10 | |||
| 11 | # SFF w/ custom DMA | ||
| 12 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | ||
| 13 | obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o | ||
| 14 | obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o | ||
| 15 | obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o | ||
| 16 | obj-$(CONFIG_SATA_SX4) += sata_sx4.o | ||
| 17 | |||
| 18 | # SFF SATA w/ BMDMA | ||
| 7 | obj-$(CONFIG_ATA_PIIX) += ata_piix.o | 19 | obj-$(CONFIG_ATA_PIIX) += ata_piix.o |
| 20 | obj-$(CONFIG_SATA_MV) += sata_mv.o | ||
| 21 | obj-$(CONFIG_SATA_NV) += sata_nv.o | ||
| 8 | obj-$(CONFIG_SATA_PROMISE) += sata_promise.o | 22 | obj-$(CONFIG_SATA_PROMISE) += sata_promise.o |
| 9 | obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o | ||
| 10 | obj-$(CONFIG_SATA_SIL) += sata_sil.o | 23 | obj-$(CONFIG_SATA_SIL) += sata_sil.o |
| 11 | obj-$(CONFIG_SATA_SIL24) += sata_sil24.o | ||
| 12 | obj-$(CONFIG_SATA_VIA) += sata_via.o | ||
| 13 | obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o | ||
| 14 | obj-$(CONFIG_SATA_SIS) += sata_sis.o | 24 | obj-$(CONFIG_SATA_SIS) += sata_sis.o |
| 15 | obj-$(CONFIG_SATA_SX4) += sata_sx4.o | 25 | obj-$(CONFIG_SATA_SVW) += sata_svw.o |
| 16 | obj-$(CONFIG_SATA_NV) += sata_nv.o | ||
| 17 | obj-$(CONFIG_SATA_ULI) += sata_uli.o | 26 | obj-$(CONFIG_SATA_ULI) += sata_uli.o |
| 18 | obj-$(CONFIG_SATA_MV) += sata_mv.o | 27 | obj-$(CONFIG_SATA_VIA) += sata_via.o |
| 19 | obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o | 28 | obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o |
| 20 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | ||
| 21 | obj-$(CONFIG_SATA_FSL) += sata_fsl.o | ||
| 22 | obj-$(CONFIG_PATA_MACIO) += pata_macio.o | ||
| 23 | 29 | ||
| 30 | # SFF PATA w/ BMDMA | ||
| 24 | obj-$(CONFIG_PATA_ALI) += pata_ali.o | 31 | obj-$(CONFIG_PATA_ALI) += pata_ali.o |
| 25 | obj-$(CONFIG_PATA_AMD) += pata_amd.o | 32 | obj-$(CONFIG_PATA_AMD) += pata_amd.o |
| 26 | obj-$(CONFIG_PATA_ARTOP) += pata_artop.o | 33 | obj-$(CONFIG_PATA_ARTOP) += pata_artop.o |
| 27 | obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o | ||
| 28 | obj-$(CONFIG_PATA_AT32) += pata_at32.o | ||
| 29 | obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o | 34 | obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o |
| 30 | obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o | 35 | obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o |
| 36 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o | ||
| 31 | obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o | 37 | obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o |
| 32 | obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o | 38 | obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o |
| 33 | obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o | 39 | obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o |
| @@ -39,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o | |||
| 39 | obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o | 45 | obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o |
| 40 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o | 46 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o |
| 41 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o | 47 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o |
| 42 | obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o | 48 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o |
| 43 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o | ||
| 44 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o | 49 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o |
| 50 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o | ||
| 45 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o | 51 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o |
| 52 | obj-$(CONFIG_PATA_MACIO) += pata_macio.o | ||
| 53 | obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o | ||
| 46 | obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o | 54 | obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o |
| 47 | obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o | 55 | obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o |
| 48 | obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o | ||
| 49 | obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o | 56 | obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o |
| 50 | obj-$(CONFIG_PATA_OPTI) += pata_opti.o | ||
| 51 | obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o | ||
| 52 | obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o | ||
| 53 | obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o | ||
| 54 | obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o | ||
| 55 | obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o | 57 | obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o |
| 56 | obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o | 58 | obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o |
| 57 | obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o | ||
| 58 | obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o | 59 | obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o |
| 59 | obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o | 60 | obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o |
| 60 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o | ||
| 61 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o | 61 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o |
| 62 | obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o | ||
| 63 | obj-$(CONFIG_PATA_RDC) += pata_rdc.o | 62 | obj-$(CONFIG_PATA_RDC) += pata_rdc.o |
| 64 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o | ||
| 65 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o | 63 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o |
| 64 | obj-$(CONFIG_PATA_SCC) += pata_scc.o | ||
| 65 | obj-$(CONFIG_PATA_SCH) += pata_sch.o | ||
| 66 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o | 66 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o |
| 67 | obj-$(CONFIG_PATA_SIL680) += pata_sil680.o | 67 | obj-$(CONFIG_PATA_SIL680) += pata_sil680.o |
| 68 | obj-$(CONFIG_PATA_SIS) += pata_sis.o | ||
| 68 | obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o | 69 | obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o |
| 70 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o | ||
| 69 | obj-$(CONFIG_PATA_VIA) += pata_via.o | 71 | obj-$(CONFIG_PATA_VIA) += pata_via.o |
| 70 | obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o | 72 | obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o |
| 71 | obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o | 73 | |
| 72 | obj-$(CONFIG_PATA_SIS) += pata_sis.o | 74 | # SFF PIO only |
| 73 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o | 75 | obj-$(CONFIG_PATA_AT32) += pata_at32.o |
| 76 | obj-$(CONFIG_PATA_AT91) += pata_at91.o | ||
| 77 | obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o | ||
| 78 | obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o | ||
| 74 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o | 79 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o |
| 75 | obj-$(CONFIG_PATA_SCC) += pata_scc.o | 80 | obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o |
| 76 | obj-$(CONFIG_PATA_SCH) += pata_sch.o | 81 | obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o |
| 77 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o | 82 | obj-$(CONFIG_PATA_OPTI) += pata_opti.o |
| 78 | obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o | 83 | obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o |
| 84 | obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o | ||
| 79 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o | 85 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o |
| 80 | obj-$(CONFIG_PATA_AT91) += pata_at91.o | ||
| 81 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o | 86 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o |
| 82 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o | 87 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o |
| 88 | obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o | ||
| 89 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o | ||
| 90 | obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o | ||
| 91 | |||
| 83 | # Should be last but two libata driver | 92 | # Should be last but two libata driver |
| 84 | obj-$(CONFIG_PATA_ACPI) += pata_acpi.o | 93 | obj-$(CONFIG_PATA_ACPI) += pata_acpi.o |
| 85 | # Should be last but one libata driver | 94 | # Should be last but one libata driver |
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 33fb614f9784..573158a9668d 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c | |||
| @@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
| 155 | return rc; | 155 | return rc; |
| 156 | pcim_pin_device(dev); | 156 | pcim_pin_device(dev); |
| 157 | } | 157 | } |
| 158 | return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); | 158 | return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static struct pci_device_id ata_generic[] = { | 161 | static struct pci_device_id ata_generic[] = { |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ec52fc618763..7409f98d2ae6 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
| @@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
| 1589 | hpriv->map = piix_init_sata_map(pdev, port_info, | 1589 | hpriv->map = piix_init_sata_map(pdev, port_info, |
| 1590 | piix_map_db_table[ent->driver_data]); | 1590 | piix_map_db_table[ent->driver_data]); |
| 1591 | 1591 | ||
| 1592 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 1592 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 1593 | if (rc) | 1593 | if (rc) |
| 1594 | return rc; | 1594 | return rc; |
| 1595 | host->private_data = hpriv; | 1595 | host->private_data = hpriv; |
| @@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, | |||
| 1626 | host->flags |= ATA_HOST_PARALLEL_SCAN; | 1626 | host->flags |= ATA_HOST_PARALLEL_SCAN; |
| 1627 | 1627 | ||
| 1628 | pci_set_master(pdev); | 1628 | pci_set_master(pdev); |
| 1629 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); | 1629 | return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); |
| 1630 | } | 1630 | } |
| 1631 | 1631 | ||
| 1632 | static void piix_remove_one(struct pci_dev *pdev) | 1632 | static void piix_remove_one(struct pci_dev *pdev) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c47373f01f89..06b7e49e039c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -160,6 +160,10 @@ int libata_allow_tpm = 0; | |||
| 160 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); | 160 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); |
| 161 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); | 161 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); |
| 162 | 162 | ||
| 163 | static int atapi_an; | ||
| 164 | module_param(atapi_an, int, 0444); | ||
| 165 | MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); | ||
| 166 | |||
| 163 | MODULE_AUTHOR("Jeff Garzik"); | 167 | MODULE_AUTHOR("Jeff Garzik"); |
| 164 | MODULE_DESCRIPTION("Library module for ATA devices"); | 168 | MODULE_DESCRIPTION("Library module for ATA devices"); |
| 165 | MODULE_LICENSE("GPL"); | 169 | MODULE_LICENSE("GPL"); |
| @@ -2122,6 +2126,14 @@ retry: | |||
| 2122 | goto err_out; | 2126 | goto err_out; |
| 2123 | } | 2127 | } |
| 2124 | 2128 | ||
| 2129 | if (dev->horkage & ATA_HORKAGE_DUMP_ID) { | ||
| 2130 | ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " | ||
| 2131 | "class=%d may_fallback=%d tried_spinup=%d\n", | ||
| 2132 | class, may_fallback, tried_spinup); | ||
| 2133 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, | ||
| 2134 | 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); | ||
| 2135 | } | ||
| 2136 | |||
| 2125 | /* Falling back doesn't make sense if ID data was read | 2137 | /* Falling back doesn't make sense if ID data was read |
| 2126 | * successfully at least once. | 2138 | * successfully at least once. |
| 2127 | */ | 2139 | */ |
| @@ -2510,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev) | |||
| 2510 | * to enable ATAPI AN to discern between PHY status | 2522 | * to enable ATAPI AN to discern between PHY status |
| 2511 | * changed notifications and ATAPI ANs. | 2523 | * changed notifications and ATAPI ANs. |
| 2512 | */ | 2524 | */ |
| 2513 | if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && | 2525 | if (atapi_an && |
| 2526 | (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && | ||
| 2514 | (!sata_pmp_attached(ap) || | 2527 | (!sata_pmp_attached(ap) || |
| 2515 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { | 2528 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { |
| 2516 | unsigned int err_mask; | 2529 | unsigned int err_mask; |
| @@ -6372,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur, | |||
| 6372 | { "3.0Gbps", .spd_limit = 2 }, | 6385 | { "3.0Gbps", .spd_limit = 2 }, |
| 6373 | { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, | 6386 | { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, |
| 6374 | { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, | 6387 | { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, |
| 6388 | { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, | ||
| 6375 | { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, | 6389 | { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, |
| 6376 | { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, | 6390 | { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, |
| 6377 | { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, | 6391 | { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 19ddf924944f..efa4a18cfb9d 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -63,7 +63,6 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
| 63 | .sff_tf_read = ata_sff_tf_read, | 63 | .sff_tf_read = ata_sff_tf_read, |
| 64 | .sff_exec_command = ata_sff_exec_command, | 64 | .sff_exec_command = ata_sff_exec_command, |
| 65 | .sff_data_xfer = ata_sff_data_xfer, | 65 | .sff_data_xfer = ata_sff_data_xfer, |
| 66 | .sff_irq_clear = ata_sff_irq_clear, | ||
| 67 | .sff_drain_fifo = ata_sff_drain_fifo, | 66 | .sff_drain_fifo = ata_sff_drain_fifo, |
| 68 | 67 | ||
| 69 | .lost_interrupt = ata_sff_lost_interrupt, | 68 | .lost_interrupt = ata_sff_lost_interrupt, |
| @@ -395,33 +394,12 @@ void ata_sff_irq_on(struct ata_port *ap) | |||
| 395 | ata_sff_set_devctl(ap, ap->ctl); | 394 | ata_sff_set_devctl(ap, ap->ctl); |
| 396 | ata_wait_idle(ap); | 395 | ata_wait_idle(ap); |
| 397 | 396 | ||
| 398 | ap->ops->sff_irq_clear(ap); | 397 | if (ap->ops->sff_irq_clear) |
| 398 | ap->ops->sff_irq_clear(ap); | ||
| 399 | } | 399 | } |
| 400 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); | 400 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); |
| 401 | 401 | ||
| 402 | /** | 402 | /** |
| 403 | * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
| 404 | * @ap: Port associated with this ATA transaction. | ||
| 405 | * | ||
| 406 | * Clear interrupt and error flags in DMA status register. | ||
| 407 | * | ||
| 408 | * May be used as the irq_clear() entry in ata_port_operations. | ||
| 409 | * | ||
| 410 | * LOCKING: | ||
| 411 | * spin_lock_irqsave(host lock) | ||
| 412 | */ | ||
| 413 | void ata_sff_irq_clear(struct ata_port *ap) | ||
| 414 | { | ||
| 415 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
| 416 | |||
| 417 | if (!mmio) | ||
| 418 | return; | ||
| 419 | |||
| 420 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | ||
| 421 | } | ||
| 422 | EXPORT_SYMBOL_GPL(ata_sff_irq_clear); | ||
| 423 | |||
| 424 | /** | ||
| 425 | * ata_sff_tf_load - send taskfile registers to host controller | 403 | * ata_sff_tf_load - send taskfile registers to host controller |
| 426 | * @ap: Port to which output is sent | 404 | * @ap: Port to which output is sent |
| 427 | * @tf: ATA taskfile register set | 405 | * @tf: ATA taskfile register set |
| @@ -820,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
| 820 | case ATAPI_PROT_NODATA: | 798 | case ATAPI_PROT_NODATA: |
| 821 | ap->hsm_task_state = HSM_ST_LAST; | 799 | ap->hsm_task_state = HSM_ST_LAST; |
| 822 | break; | 800 | break; |
| 801 | #ifdef CONFIG_ATA_BMDMA | ||
| 823 | case ATAPI_PROT_DMA: | 802 | case ATAPI_PROT_DMA: |
| 824 | ap->hsm_task_state = HSM_ST_LAST; | 803 | ap->hsm_task_state = HSM_ST_LAST; |
| 825 | /* initiate bmdma */ | 804 | /* initiate bmdma */ |
| 826 | ap->ops->bmdma_start(qc); | 805 | ap->ops->bmdma_start(qc); |
| 827 | break; | 806 | break; |
| 807 | #endif /* CONFIG_ATA_BMDMA */ | ||
| 808 | default: | ||
| 809 | BUG(); | ||
| 828 | } | 810 | } |
| 829 | } | 811 | } |
| 830 | 812 | ||
| @@ -1491,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) | |||
| 1491 | } | 1473 | } |
| 1492 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); | 1474 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); |
| 1493 | 1475 | ||
| 1494 | /** | 1476 | static unsigned int ata_sff_idle_irq(struct ata_port *ap) |
| 1495 | * ata_sff_host_intr - Handle host interrupt for given (port, task) | ||
| 1496 | * @ap: Port on which interrupt arrived (possibly...) | ||
| 1497 | * @qc: Taskfile currently active in engine | ||
| 1498 | * | ||
| 1499 | * Handle host interrupt for given queued command. Currently, | ||
| 1500 | * only DMA interrupts are handled. All other commands are | ||
| 1501 | * handled via polling with interrupts disabled (nIEN bit). | ||
| 1502 | * | ||
| 1503 | * LOCKING: | ||
| 1504 | * spin_lock_irqsave(host lock) | ||
| 1505 | * | ||
| 1506 | * RETURNS: | ||
| 1507 | * One if interrupt was handled, zero if not (shared irq). | ||
| 1508 | */ | ||
| 1509 | unsigned int ata_sff_host_intr(struct ata_port *ap, | ||
| 1510 | struct ata_queued_cmd *qc) | ||
| 1511 | { | 1477 | { |
| 1512 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1478 | ap->stats.idle_irq++; |
| 1513 | u8 status, host_stat = 0; | 1479 | |
| 1514 | bool bmdma_stopped = false; | 1480 | #ifdef ATA_IRQ_TRAP |
| 1481 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
| 1482 | ap->ops->sff_check_status(ap); | ||
| 1483 | if (ap->ops->sff_irq_clear) | ||
| 1484 | ap->ops->sff_irq_clear(ap); | ||
| 1485 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
| 1486 | return 1; | ||
| 1487 | } | ||
| 1488 | #endif | ||
| 1489 | return 0; /* irq not handled */ | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | static unsigned int __ata_sff_port_intr(struct ata_port *ap, | ||
| 1493 | struct ata_queued_cmd *qc, | ||
| 1494 | bool hsmv_on_idle) | ||
| 1495 | { | ||
| 1496 | u8 status; | ||
| 1515 | 1497 | ||
| 1516 | VPRINTK("ata%u: protocol %d task_state %d\n", | 1498 | VPRINTK("ata%u: protocol %d task_state %d\n", |
| 1517 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); | 1499 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); |
| @@ -1528,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
| 1528 | * need to check ata_is_atapi(qc->tf.protocol) again. | 1510 | * need to check ata_is_atapi(qc->tf.protocol) again. |
| 1529 | */ | 1511 | */ |
| 1530 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | 1512 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 1531 | goto idle_irq; | 1513 | return ata_sff_idle_irq(ap); |
| 1532 | break; | ||
| 1533 | case HSM_ST_LAST: | ||
| 1534 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
| 1535 | qc->tf.protocol == ATAPI_PROT_DMA) { | ||
| 1536 | /* check status of DMA engine */ | ||
| 1537 | host_stat = ap->ops->bmdma_status(ap); | ||
| 1538 | VPRINTK("ata%u: host_stat 0x%X\n", | ||
| 1539 | ap->print_id, host_stat); | ||
| 1540 | |||
| 1541 | /* if it's not our irq... */ | ||
| 1542 | if (!(host_stat & ATA_DMA_INTR)) | ||
| 1543 | goto idle_irq; | ||
| 1544 | |||
| 1545 | /* before we do anything else, clear DMA-Start bit */ | ||
| 1546 | ap->ops->bmdma_stop(qc); | ||
| 1547 | bmdma_stopped = true; | ||
| 1548 | |||
| 1549 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
| 1550 | /* error when transfering data to/from memory */ | ||
| 1551 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
| 1552 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 1553 | } | ||
| 1554 | } | ||
| 1555 | break; | 1514 | break; |
| 1556 | case HSM_ST: | 1515 | case HSM_ST: |
| 1516 | case HSM_ST_LAST: | ||
| 1557 | break; | 1517 | break; |
| 1558 | default: | 1518 | default: |
| 1559 | goto idle_irq; | 1519 | return ata_sff_idle_irq(ap); |
| 1560 | } | 1520 | } |
| 1561 | 1521 | ||
| 1562 | |||
| 1563 | /* check main status, clearing INTRQ if needed */ | 1522 | /* check main status, clearing INTRQ if needed */ |
| 1564 | status = ata_sff_irq_status(ap); | 1523 | status = ata_sff_irq_status(ap); |
| 1565 | if (status & ATA_BUSY) { | 1524 | if (status & ATA_BUSY) { |
| 1566 | if (bmdma_stopped) { | 1525 | if (hsmv_on_idle) { |
| 1567 | /* BMDMA engine is already stopped, we're screwed */ | 1526 | /* BMDMA engine is already stopped, we're screwed */ |
| 1568 | qc->err_mask |= AC_ERR_HSM; | 1527 | qc->err_mask |= AC_ERR_HSM; |
| 1569 | ap->hsm_task_state = HSM_ST_ERR; | 1528 | ap->hsm_task_state = HSM_ST_ERR; |
| 1570 | } else | 1529 | } else |
| 1571 | goto idle_irq; | 1530 | return ata_sff_idle_irq(ap); |
| 1572 | } | 1531 | } |
| 1573 | 1532 | ||
| 1574 | /* clear irq events */ | 1533 | /* clear irq events */ |
| 1575 | ap->ops->sff_irq_clear(ap); | 1534 | if (ap->ops->sff_irq_clear) |
| 1535 | ap->ops->sff_irq_clear(ap); | ||
| 1576 | 1536 | ||
| 1577 | ata_sff_hsm_move(ap, qc, status, 0); | 1537 | ata_sff_hsm_move(ap, qc, status, 0); |
| 1578 | 1538 | ||
| 1579 | if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || | ||
| 1580 | qc->tf.protocol == ATAPI_PROT_DMA)) | ||
| 1581 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
| 1582 | |||
| 1583 | return 1; /* irq handled */ | 1539 | return 1; /* irq handled */ |
| 1584 | |||
| 1585 | idle_irq: | ||
| 1586 | ap->stats.idle_irq++; | ||
| 1587 | |||
| 1588 | #ifdef ATA_IRQ_TRAP | ||
| 1589 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
| 1590 | ap->ops->sff_check_status(ap); | ||
| 1591 | ap->ops->sff_irq_clear(ap); | ||
| 1592 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
| 1593 | return 1; | ||
| 1594 | } | ||
| 1595 | #endif | ||
| 1596 | return 0; /* irq not handled */ | ||
| 1597 | } | 1540 | } |
| 1598 | EXPORT_SYMBOL_GPL(ata_sff_host_intr); | ||
| 1599 | 1541 | ||
| 1600 | /** | 1542 | /** |
| 1601 | * ata_sff_interrupt - Default ATA host interrupt handler | 1543 | * ata_sff_port_intr - Handle SFF port interrupt |
| 1602 | * @irq: irq line (unused) | 1544 | * @ap: Port on which interrupt arrived (possibly...) |
| 1603 | * @dev_instance: pointer to our ata_host information structure | 1545 | * @qc: Taskfile currently active in engine |
| 1604 | * | 1546 | * |
| 1605 | * Default interrupt handler for PCI IDE devices. Calls | 1547 | * Handle port interrupt for given queued command. |
| 1606 | * ata_sff_host_intr() for each port that is not disabled. | ||
| 1607 | * | 1548 | * |
| 1608 | * LOCKING: | 1549 | * LOCKING: |
| 1609 | * Obtains host lock during operation. | 1550 | * spin_lock_irqsave(host lock) |
| 1610 | * | 1551 | * |
| 1611 | * RETURNS: | 1552 | * RETURNS: |
| 1612 | * IRQ_NONE or IRQ_HANDLED. | 1553 | * One if interrupt was handled, zero if not (shared irq). |
| 1613 | */ | 1554 | */ |
| 1614 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | 1555 | unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
| 1556 | { | ||
| 1557 | return __ata_sff_port_intr(ap, qc, false); | ||
| 1558 | } | ||
| 1559 | EXPORT_SYMBOL_GPL(ata_sff_port_intr); | ||
| 1560 | |||
| 1561 | static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, | ||
| 1562 | unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) | ||
| 1615 | { | 1563 | { |
| 1616 | struct ata_host *host = dev_instance; | 1564 | struct ata_host *host = dev_instance; |
| 1617 | bool retried = false; | 1565 | bool retried = false; |
| @@ -1631,7 +1579,7 @@ retry: | |||
| 1631 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1579 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
| 1632 | if (qc) { | 1580 | if (qc) { |
| 1633 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) | 1581 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
| 1634 | handled |= ata_sff_host_intr(ap, qc); | 1582 | handled |= port_intr(ap, qc); |
| 1635 | else | 1583 | else |
| 1636 | polling |= 1 << i; | 1584 | polling |= 1 << i; |
| 1637 | } else | 1585 | } else |
| @@ -1658,7 +1606,8 @@ retry: | |||
| 1658 | 1606 | ||
| 1659 | if (idle & (1 << i)) { | 1607 | if (idle & (1 << i)) { |
| 1660 | ap->ops->sff_check_status(ap); | 1608 | ap->ops->sff_check_status(ap); |
| 1661 | ap->ops->sff_irq_clear(ap); | 1609 | if (ap->ops->sff_irq_clear) |
| 1610 | ap->ops->sff_irq_clear(ap); | ||
| 1662 | } else { | 1611 | } else { |
| 1663 | /* clear INTRQ and check if BUSY cleared */ | 1612 | /* clear INTRQ and check if BUSY cleared */ |
| 1664 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) | 1613 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) |
| @@ -1680,6 +1629,25 @@ retry: | |||
| 1680 | 1629 | ||
| 1681 | return IRQ_RETVAL(handled); | 1630 | return IRQ_RETVAL(handled); |
| 1682 | } | 1631 | } |
| 1632 | |||
| 1633 | /** | ||
| 1634 | * ata_sff_interrupt - Default SFF ATA host interrupt handler | ||
| 1635 | * @irq: irq line (unused) | ||
| 1636 | * @dev_instance: pointer to our ata_host information structure | ||
| 1637 | * | ||
| 1638 | * Default interrupt handler for PCI IDE devices. Calls | ||
| 1639 | * ata_sff_port_intr() for each port that is not disabled. | ||
| 1640 | * | ||
| 1641 | * LOCKING: | ||
| 1642 | * Obtains host lock during operation. | ||
| 1643 | * | ||
| 1644 | * RETURNS: | ||
| 1645 | * IRQ_NONE or IRQ_HANDLED. | ||
| 1646 | */ | ||
| 1647 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | ||
| 1648 | { | ||
| 1649 | return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); | ||
| 1650 | } | ||
| 1683 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); | 1651 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); |
| 1684 | 1652 | ||
| 1685 | /** | 1653 | /** |
| @@ -1717,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) | |||
| 1717 | status); | 1685 | status); |
| 1718 | /* Run the host interrupt logic as if the interrupt had not been | 1686 | /* Run the host interrupt logic as if the interrupt had not been |
| 1719 | lost */ | 1687 | lost */ |
| 1720 | ata_sff_host_intr(ap, qc); | 1688 | ata_sff_port_intr(ap, qc); |
| 1721 | } | 1689 | } |
| 1722 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); | 1690 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); |
| 1723 | 1691 | ||
| @@ -1744,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap) | |||
| 1744 | */ | 1712 | */ |
| 1745 | ap->ops->sff_check_status(ap); | 1713 | ap->ops->sff_check_status(ap); |
| 1746 | 1714 | ||
| 1747 | ap->ops->sff_irq_clear(ap); | 1715 | if (ap->ops->sff_irq_clear) |
| 1716 | ap->ops->sff_irq_clear(ap); | ||
| 1748 | } | 1717 | } |
| 1749 | EXPORT_SYMBOL_GPL(ata_sff_freeze); | 1718 | EXPORT_SYMBOL_GPL(ata_sff_freeze); |
| 1750 | 1719 | ||
| @@ -1761,7 +1730,8 @@ void ata_sff_thaw(struct ata_port *ap) | |||
| 1761 | { | 1730 | { |
| 1762 | /* clear & re-enable interrupts */ | 1731 | /* clear & re-enable interrupts */ |
| 1763 | ap->ops->sff_check_status(ap); | 1732 | ap->ops->sff_check_status(ap); |
| 1764 | ap->ops->sff_irq_clear(ap); | 1733 | if (ap->ops->sff_irq_clear) |
| 1734 | ap->ops->sff_irq_clear(ap); | ||
| 1765 | ata_sff_irq_on(ap); | 1735 | ata_sff_irq_on(ap); |
| 1766 | } | 1736 | } |
| 1767 | EXPORT_SYMBOL_GPL(ata_sff_thaw); | 1737 | EXPORT_SYMBOL_GPL(ata_sff_thaw); |
| @@ -2349,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host) | |||
| 2349 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); | 2319 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); |
| 2350 | 2320 | ||
| 2351 | /** | 2321 | /** |
| 2352 | * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host | 2322 | * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host |
| 2353 | * @pdev: target PCI device | 2323 | * @pdev: target PCI device |
| 2354 | * @ppi: array of port_info, must be enough for two ports | 2324 | * @ppi: array of port_info, must be enough for two ports |
| 2355 | * @r_host: out argument for the initialized ATA host | 2325 | * @r_host: out argument for the initialized ATA host |
| 2356 | * | 2326 | * |
| 2357 | * Helper to allocate ATA host for @pdev, acquire all native PCI | 2327 | * Helper to allocate PIO-only SFF ATA host for @pdev, acquire |
| 2358 | * resources and initialize it accordingly in one go. | 2328 | * all PCI resources and initialize it accordingly in one go. |
| 2359 | * | 2329 | * |
| 2360 | * LOCKING: | 2330 | * LOCKING: |
| 2361 | * Inherited from calling layer (may sleep). | 2331 | * Inherited from calling layer (may sleep). |
| @@ -2385,9 +2355,6 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, | |||
| 2385 | if (rc) | 2355 | if (rc) |
| 2386 | goto err_out; | 2356 | goto err_out; |
| 2387 | 2357 | ||
| 2388 | /* init DMA related stuff */ | ||
| 2389 | ata_pci_bmdma_init(host); | ||
| 2390 | |||
| 2391 | devres_remove_group(&pdev->dev, NULL); | 2358 | devres_remove_group(&pdev->dev, NULL); |
| 2392 | *r_host = host; | 2359 | *r_host = host; |
| 2393 | return 0; | 2360 | return 0; |
| @@ -2492,8 +2459,21 @@ out: | |||
| 2492 | } | 2459 | } |
| 2493 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | 2460 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); |
| 2494 | 2461 | ||
| 2462 | static const struct ata_port_info *ata_sff_find_valid_pi( | ||
| 2463 | const struct ata_port_info * const *ppi) | ||
| 2464 | { | ||
| 2465 | int i; | ||
| 2466 | |||
| 2467 | /* look up the first valid port_info */ | ||
| 2468 | for (i = 0; i < 2 && ppi[i]; i++) | ||
| 2469 | if (ppi[i]->port_ops != &ata_dummy_port_ops) | ||
| 2470 | return ppi[i]; | ||
| 2471 | |||
| 2472 | return NULL; | ||
| 2473 | } | ||
| 2474 | |||
| 2495 | /** | 2475 | /** |
| 2496 | * ata_pci_sff_init_one - Initialize/register PCI IDE host controller | 2476 | * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller |
| 2497 | * @pdev: Controller to be initialized | 2477 | * @pdev: Controller to be initialized |
| 2498 | * @ppi: array of port_info, must be enough for two ports | 2478 | * @ppi: array of port_info, must be enough for two ports |
| 2499 | * @sht: scsi_host_template to use when registering the host | 2479 | * @sht: scsi_host_template to use when registering the host |
| @@ -2502,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | |||
| 2502 | * | 2482 | * |
| 2503 | * This is a helper function which can be called from a driver's | 2483 | * This is a helper function which can be called from a driver's |
| 2504 | * xxx_init_one() probe function if the hardware uses traditional | 2484 | * xxx_init_one() probe function if the hardware uses traditional |
| 2505 | * IDE taskfile registers. | 2485 | * IDE taskfile registers and is PIO only. |
| 2506 | * | ||
| 2507 | * This function calls pci_enable_device(), reserves its register | ||
| 2508 | * regions, sets the dma mask, enables bus master mode, and calls | ||
| 2509 | * ata_device_add() | ||
| 2510 | * | 2486 | * |
| 2511 | * ASSUMPTION: | 2487 | * ASSUMPTION: |
| 2512 | * Nobody makes a single channel controller that appears solely as | 2488 | * Nobody makes a single channel controller that appears solely as |
| @@ -2523,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
| 2523 | struct scsi_host_template *sht, void *host_priv, int hflag) | 2499 | struct scsi_host_template *sht, void *host_priv, int hflag) |
| 2524 | { | 2500 | { |
| 2525 | struct device *dev = &pdev->dev; | 2501 | struct device *dev = &pdev->dev; |
| 2526 | const struct ata_port_info *pi = NULL; | 2502 | const struct ata_port_info *pi; |
| 2527 | struct ata_host *host = NULL; | 2503 | struct ata_host *host = NULL; |
| 2528 | int i, rc; | 2504 | int rc; |
| 2529 | 2505 | ||
| 2530 | DPRINTK("ENTER\n"); | 2506 | DPRINTK("ENTER\n"); |
| 2531 | 2507 | ||
| 2532 | /* look up the first valid port_info */ | 2508 | pi = ata_sff_find_valid_pi(ppi); |
| 2533 | for (i = 0; i < 2 && ppi[i]; i++) { | ||
| 2534 | if (ppi[i]->port_ops != &ata_dummy_port_ops) { | ||
| 2535 | pi = ppi[i]; | ||
| 2536 | break; | ||
| 2537 | } | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | if (!pi) { | 2509 | if (!pi) { |
| 2541 | dev_printk(KERN_ERR, &pdev->dev, | 2510 | dev_printk(KERN_ERR, &pdev->dev, |
| 2542 | "no valid port_info specified\n"); | 2511 | "no valid port_info specified\n"); |
| @@ -2557,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
| 2557 | host->private_data = host_priv; | 2526 | host->private_data = host_priv; |
| 2558 | host->flags |= hflag; | 2527 | host->flags |= hflag; |
| 2559 | 2528 | ||
| 2560 | pci_set_master(pdev); | ||
| 2561 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); | 2529 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
| 2562 | out: | 2530 | out: |
| 2563 | if (rc == 0) | 2531 | if (rc == 0) |
| @@ -2571,6 +2539,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | |||
| 2571 | 2539 | ||
| 2572 | #endif /* CONFIG_PCI */ | 2540 | #endif /* CONFIG_PCI */ |
| 2573 | 2541 | ||
| 2542 | /* | ||
| 2543 | * BMDMA support | ||
| 2544 | */ | ||
| 2545 | |||
| 2546 | #ifdef CONFIG_ATA_BMDMA | ||
| 2547 | |||
| 2574 | const struct ata_port_operations ata_bmdma_port_ops = { | 2548 | const struct ata_port_operations ata_bmdma_port_ops = { |
| 2575 | .inherits = &ata_sff_port_ops, | 2549 | .inherits = &ata_sff_port_ops, |
| 2576 | 2550 | ||
| @@ -2580,6 +2554,7 @@ const struct ata_port_operations ata_bmdma_port_ops = { | |||
| 2580 | .qc_prep = ata_bmdma_qc_prep, | 2554 | .qc_prep = ata_bmdma_qc_prep, |
| 2581 | .qc_issue = ata_bmdma_qc_issue, | 2555 | .qc_issue = ata_bmdma_qc_issue, |
| 2582 | 2556 | ||
| 2557 | .sff_irq_clear = ata_bmdma_irq_clear, | ||
| 2583 | .bmdma_setup = ata_bmdma_setup, | 2558 | .bmdma_setup = ata_bmdma_setup, |
| 2584 | .bmdma_start = ata_bmdma_start, | 2559 | .bmdma_start = ata_bmdma_start, |
| 2585 | .bmdma_stop = ata_bmdma_stop, | 2560 | .bmdma_stop = ata_bmdma_stop, |
| @@ -2804,6 +2779,75 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | |||
| 2804 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); | 2779 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); |
| 2805 | 2780 | ||
| 2806 | /** | 2781 | /** |
| 2782 | * ata_bmdma_port_intr - Handle BMDMA port interrupt | ||
| 2783 | * @ap: Port on which interrupt arrived (possibly...) | ||
| 2784 | * @qc: Taskfile currently active in engine | ||
| 2785 | * | ||
| 2786 | * Handle port interrupt for given queued command. | ||
| 2787 | * | ||
| 2788 | * LOCKING: | ||
| 2789 | * spin_lock_irqsave(host lock) | ||
| 2790 | * | ||
| 2791 | * RETURNS: | ||
| 2792 | * One if interrupt was handled, zero if not (shared irq). | ||
| 2793 | */ | ||
| 2794 | unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
| 2795 | { | ||
| 2796 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
| 2797 | u8 host_stat = 0; | ||
| 2798 | bool bmdma_stopped = false; | ||
| 2799 | unsigned int handled; | ||
| 2800 | |||
| 2801 | if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { | ||
| 2802 | /* check status of DMA engine */ | ||
| 2803 | host_stat = ap->ops->bmdma_status(ap); | ||
| 2804 | VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); | ||
| 2805 | |||
| 2806 | /* if it's not our irq... */ | ||
| 2807 | if (!(host_stat & ATA_DMA_INTR)) | ||
| 2808 | return ata_sff_idle_irq(ap); | ||
| 2809 | |||
| 2810 | /* before we do anything else, clear DMA-Start bit */ | ||
| 2811 | ap->ops->bmdma_stop(qc); | ||
| 2812 | bmdma_stopped = true; | ||
| 2813 | |||
| 2814 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
| 2815 | /* error when transfering data to/from memory */ | ||
| 2816 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
| 2817 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 2818 | } | ||
| 2819 | } | ||
| 2820 | |||
| 2821 | handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); | ||
| 2822 | |||
| 2823 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) | ||
| 2824 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
| 2825 | |||
| 2826 | return handled; | ||
| 2827 | } | ||
| 2828 | EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); | ||
| 2829 | |||
| 2830 | /** | ||
| 2831 | * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler | ||
| 2832 | * @irq: irq line (unused) | ||
| 2833 | * @dev_instance: pointer to our ata_host information structure | ||
| 2834 | * | ||
| 2835 | * Default interrupt handler for PCI IDE devices. Calls | ||
| 2836 | * ata_bmdma_port_intr() for each port that is not disabled. | ||
| 2837 | * | ||
| 2838 | * LOCKING: | ||
| 2839 | * Obtains host lock during operation. | ||
| 2840 | * | ||
| 2841 | * RETURNS: | ||
| 2842 | * IRQ_NONE or IRQ_HANDLED. | ||
| 2843 | */ | ||
| 2844 | irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) | ||
| 2845 | { | ||
| 2846 | return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); | ||
| 2847 | } | ||
| 2848 | EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); | ||
| 2849 | |||
| 2850 | /** | ||
| 2807 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller | 2851 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller |
| 2808 | * @ap: port to handle error for | 2852 | * @ap: port to handle error for |
| 2809 | * | 2853 | * |
| @@ -2848,7 +2892,8 @@ void ata_bmdma_error_handler(struct ata_port *ap) | |||
| 2848 | /* if we're gonna thaw, make sure IRQ is clear */ | 2892 | /* if we're gonna thaw, make sure IRQ is clear */ |
| 2849 | if (thaw) { | 2893 | if (thaw) { |
| 2850 | ap->ops->sff_check_status(ap); | 2894 | ap->ops->sff_check_status(ap); |
| 2851 | ap->ops->sff_irq_clear(ap); | 2895 | if (ap->ops->sff_irq_clear) |
| 2896 | ap->ops->sff_irq_clear(ap); | ||
| 2852 | } | 2897 | } |
| 2853 | } | 2898 | } |
| 2854 | 2899 | ||
| @@ -2882,6 +2927,28 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
| 2882 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | 2927 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); |
| 2883 | 2928 | ||
| 2884 | /** | 2929 | /** |
| 2930 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
| 2931 | * @ap: Port associated with this ATA transaction. | ||
| 2932 | * | ||
| 2933 | * Clear interrupt and error flags in DMA status register. | ||
| 2934 | * | ||
| 2935 | * May be used as the irq_clear() entry in ata_port_operations. | ||
| 2936 | * | ||
| 2937 | * LOCKING: | ||
| 2938 | * spin_lock_irqsave(host lock) | ||
| 2939 | */ | ||
| 2940 | void ata_bmdma_irq_clear(struct ata_port *ap) | ||
| 2941 | { | ||
| 2942 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
| 2943 | |||
| 2944 | if (!mmio) | ||
| 2945 | return; | ||
| 2946 | |||
| 2947 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | ||
| 2948 | } | ||
| 2949 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); | ||
| 2950 | |||
| 2951 | /** | ||
| 2885 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | 2952 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction |
| 2886 | * @qc: Info associated with this ATA transaction. | 2953 | * @qc: Info associated with this ATA transaction. |
| 2887 | * | 2954 | * |
| @@ -3137,7 +3204,100 @@ void ata_pci_bmdma_init(struct ata_host *host) | |||
| 3137 | } | 3204 | } |
| 3138 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | 3205 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); |
| 3139 | 3206 | ||
| 3207 | /** | ||
| 3208 | * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host | ||
| 3209 | * @pdev: target PCI device | ||
| 3210 | * @ppi: array of port_info, must be enough for two ports | ||
| 3211 | * @r_host: out argument for the initialized ATA host | ||
| 3212 | * | ||
| 3213 | * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI | ||
| 3214 | * resources and initialize it accordingly in one go. | ||
| 3215 | * | ||
| 3216 | * LOCKING: | ||
| 3217 | * Inherited from calling layer (may sleep). | ||
| 3218 | * | ||
| 3219 | * RETURNS: | ||
| 3220 | * 0 on success, -errno otherwise. | ||
| 3221 | */ | ||
| 3222 | int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, | ||
| 3223 | const struct ata_port_info * const * ppi, | ||
| 3224 | struct ata_host **r_host) | ||
| 3225 | { | ||
| 3226 | int rc; | ||
| 3227 | |||
| 3228 | rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); | ||
| 3229 | if (rc) | ||
| 3230 | return rc; | ||
| 3231 | |||
| 3232 | ata_pci_bmdma_init(*r_host); | ||
| 3233 | return 0; | ||
| 3234 | } | ||
| 3235 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); | ||
| 3236 | |||
| 3237 | /** | ||
| 3238 | * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller | ||
| 3239 | * @pdev: Controller to be initialized | ||
| 3240 | * @ppi: array of port_info, must be enough for two ports | ||
| 3241 | * @sht: scsi_host_template to use when registering the host | ||
| 3242 | * @host_priv: host private_data | ||
| 3243 | * @hflags: host flags | ||
| 3244 | * | ||
| 3245 | * This function is similar to ata_pci_sff_init_one() but also | ||
| 3246 | * takes care of BMDMA initialization. | ||
| 3247 | * | ||
| 3248 | * LOCKING: | ||
| 3249 | * Inherited from PCI layer (may sleep). | ||
| 3250 | * | ||
| 3251 | * RETURNS: | ||
| 3252 | * Zero on success, negative on errno-based value on error. | ||
| 3253 | */ | ||
| 3254 | int ata_pci_bmdma_init_one(struct pci_dev *pdev, | ||
| 3255 | const struct ata_port_info * const * ppi, | ||
| 3256 | struct scsi_host_template *sht, void *host_priv, | ||
| 3257 | int hflags) | ||
| 3258 | { | ||
| 3259 | struct device *dev = &pdev->dev; | ||
| 3260 | const struct ata_port_info *pi; | ||
| 3261 | struct ata_host *host = NULL; | ||
| 3262 | int rc; | ||
| 3263 | |||
| 3264 | DPRINTK("ENTER\n"); | ||
| 3265 | |||
| 3266 | pi = ata_sff_find_valid_pi(ppi); | ||
| 3267 | if (!pi) { | ||
| 3268 | dev_printk(KERN_ERR, &pdev->dev, | ||
| 3269 | "no valid port_info specified\n"); | ||
| 3270 | return -EINVAL; | ||
| 3271 | } | ||
| 3272 | |||
| 3273 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | ||
| 3274 | return -ENOMEM; | ||
| 3275 | |||
| 3276 | rc = pcim_enable_device(pdev); | ||
| 3277 | if (rc) | ||
| 3278 | goto out; | ||
| 3279 | |||
| 3280 | /* prepare and activate BMDMA host */ | ||
| 3281 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); | ||
| 3282 | if (rc) | ||
| 3283 | goto out; | ||
| 3284 | host->private_data = host_priv; | ||
| 3285 | host->flags |= hflags; | ||
| 3286 | |||
| 3287 | pci_set_master(pdev); | ||
| 3288 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); | ||
| 3289 | out: | ||
| 3290 | if (rc == 0) | ||
| 3291 | devres_remove_group(&pdev->dev, NULL); | ||
| 3292 | else | ||
| 3293 | devres_release_group(&pdev->dev, NULL); | ||
| 3294 | |||
| 3295 | return rc; | ||
| 3296 | } | ||
| 3297 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); | ||
| 3298 | |||
| 3140 | #endif /* CONFIG_PCI */ | 3299 | #endif /* CONFIG_PCI */ |
| 3300 | #endif /* CONFIG_ATA_BMDMA */ | ||
| 3141 | 3301 | ||
| 3142 | /** | 3302 | /** |
| 3143 | * ata_sff_port_init - Initialize SFF/BMDMA ATA port | 3303 | * ata_sff_port_init - Initialize SFF/BMDMA ATA port |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index 066b9f301ed5..c8d47034d5e9 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
| @@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 260 | return rc; | 260 | return rc; |
| 261 | pcim_pin_device(pdev); | 261 | pcim_pin_device(pdev); |
| 262 | } | 262 | } |
| 263 | return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); | 263 | return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0); |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static const struct pci_device_id pacpi_pci_tbl[] = { | 266 | static const struct pci_device_id pacpi_pci_tbl[] = { |
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index f306e10c748d..794ec6e3275d 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
| @@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 583 | ppi[0] = &info_20_udma; | 583 | ppi[0] = &info_20_udma; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); | 586 | if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) |
| 587 | return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); | ||
| 588 | else | ||
| 589 | return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); | ||
| 587 | } | 590 | } |
| 588 | 591 | ||
| 589 | #ifdef CONFIG_PM | 592 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index d95eca9c547e..620a07cabe31 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c | |||
| @@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 574 | } | 574 | } |
| 575 | 575 | ||
| 576 | /* And fire it up */ | 576 | /* And fire it up */ |
| 577 | return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); | 577 | return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | #ifdef CONFIG_PM | 580 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 4d066d6c30fa..ba43f0f8c880 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c | |||
| @@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 421 | 421 | ||
| 422 | BUG_ON(ppi[0] == NULL); | 422 | BUG_ON(ppi[0] == NULL); |
| 423 | 423 | ||
| 424 | return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); | 424 | return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); |
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | static const struct pci_device_id artop_pci_tbl[] = { | 427 | static const struct pci_device_id artop_pci_tbl[] = { |
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 44d88b380ddd..43755616dc5a 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
| @@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 246 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) | 246 | if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) |
| 247 | ppi[i] = &ata_dummy_port_info; | 247 | ppi[i] = &ata_dummy_port_info; |
| 248 | 248 | ||
| 249 | return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, | 249 | return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, |
| 250 | ATA_HOST_PARALLEL_SCAN); | 250 | ATA_HOST_PARALLEL_SCAN); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | static const struct pci_device_id atiixp[] = { | 253 | static const struct pci_device_id atiixp[] = { |
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index bb6e0746e07d..95295935dd95 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c | |||
| @@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev, | |||
| 525 | 525 | ||
| 526 | pci_set_master(pdev); | 526 | pci_set_master(pdev); |
| 527 | 527 | ||
| 528 | rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 528 | rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 529 | IRQF_SHARED, &atp867x_sht); | 529 | IRQF_SHARED, &atp867x_sht); |
| 530 | if (rc) | 530 | if (rc) |
| 531 | dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); | 531 | dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); |
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 6422cfd13d0d..9cae65de750e 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
| @@ -1214,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, | |||
| 1214 | * bfin_irq_clear - Clear ATAPI interrupt. | 1214 | * bfin_irq_clear - Clear ATAPI interrupt. |
| 1215 | * @ap: Port associated with this ATA transaction. | 1215 | * @ap: Port associated with this ATA transaction. |
| 1216 | * | 1216 | * |
| 1217 | * Note: Original code is ata_sff_irq_clear(). | 1217 | * Note: Original code is ata_bmdma_irq_clear(). |
| 1218 | */ | 1218 | */ |
| 1219 | 1219 | ||
| 1220 | static void bfin_irq_clear(struct ata_port *ap) | 1220 | static void bfin_irq_clear(struct ata_port *ap) |
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 4c81a71b8877..9f5da1c7454b 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c | |||
| @@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 367 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); | 367 | pci_write_config_byte(pdev, UDIDETCR0, 0xF0); |
| 368 | #endif | 368 | #endif |
| 369 | 369 | ||
| 370 | return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); | 370 | return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| 373 | #ifdef CONFIG_PM | 373 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 17c5f346ff01..030952f1f97c 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c | |||
| @@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi | |||
| 221 | continue; | 221 | continue; |
| 222 | 222 | ||
| 223 | rc = devm_request_irq(&pdev->dev, irq[ap->port_no], | 223 | rc = devm_request_irq(&pdev->dev, irq[ap->port_no], |
| 224 | ata_sff_interrupt, 0, DRV_NAME, host); | 224 | ata_bmdma_interrupt, 0, DRV_NAME, host); |
| 225 | if (rc) | 225 | if (rc) |
| 226 | return rc; | 226 | return rc; |
| 227 | 227 | ||
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index e809a4233a81..f792330f0d8e 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c | |||
| @@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 324 | ppi[1] = &info_palmax_secondary; | 324 | ppi[1] = &info_palmax_secondary; |
| 325 | 325 | ||
| 326 | /* Now kick off ATA set up */ | 326 | /* Now kick off ATA set up */ |
| 327 | return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); | 327 | return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | #ifdef CONFIG_PM | 330 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index a02e6459fdcc..03a93186aa19 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c | |||
| @@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 198 | rdmsr(ATAC_CH0D1_PIO, timings, dummy); | 198 | rdmsr(ATAC_CH0D1_PIO, timings, dummy); |
| 199 | if (CS5535_BAD_PIO(timings)) | 199 | if (CS5535_BAD_PIO(timings)) |
| 200 | wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); | 200 | wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); |
| 201 | return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); | 201 | return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0); |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | static const struct pci_device_id cs5535[] = { | 204 | static const struct pci_device_id cs5535[] = { |
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 914ae3506ff5..21ee23f89e88 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c | |||
| @@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 260 | return -ENODEV; | 260 | return -ENODEV; |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); | 263 | return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static const struct pci_device_id cs5536[] = { | 266 | static const struct pci_device_id cs5536[] = { |
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 0fcc096b8dac..6d915b063d93 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c | |||
| @@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i | |||
| 138 | if (PCI_FUNC(pdev->devfn) != 1) | 138 | if (PCI_FUNC(pdev->devfn) != 1) |
| 139 | return -ENODEV; | 139 | return -ENODEV; |
| 140 | 140 | ||
| 141 | return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); | 141 | return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static const struct pci_device_id cy82c693[] = { | 144 | static const struct pci_device_id cy82c693[] = { |
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 3bac0e079691..a08834758ea2 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c | |||
| @@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 277 | dev_printk(KERN_DEBUG, &pdev->dev, | 277 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 278 | "version " DRV_VERSION "\n"); | 278 | "version " DRV_VERSION "\n"); |
| 279 | 279 | ||
| 280 | return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, | 280 | return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL, |
| 281 | ATA_HOST_PARALLEL_SCAN); | 281 | ATA_HOST_PARALLEL_SCAN); |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | static const struct pci_device_id efar_pci_tbl[] = { | 284 | static const struct pci_device_id efar_pci_tbl[] = { |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 8580eb3cd54d..7688868557b9 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
| @@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 361 | break; | 361 | break; |
| 362 | } | 362 | } |
| 363 | /* Now kick off ATA set up */ | 363 | /* Now kick off ATA set up */ |
| 364 | return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); | 364 | return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); |
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | #ifdef CONFIG_PM | 367 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 98b498b6907c..9ae4c0830577 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
| @@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 987 | } | 987 | } |
| 988 | 988 | ||
| 989 | /* Now kick off ATA set up */ | 989 | /* Now kick off ATA set up */ |
| 990 | return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); | 990 | return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); |
| 991 | } | 991 | } |
| 992 | 992 | ||
| 993 | static const struct pci_device_id hpt37x[] = { | 993 | static const struct pci_device_id hpt37x[] = { |
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 8b95aeba0e74..32f3463216b8 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c | |||
| @@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 548 | outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); | 548 | outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); |
| 549 | 549 | ||
| 550 | /* Now kick off ATA set up */ | 550 | /* Now kick off ATA set up */ |
| 551 | return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); | 551 | return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | static const struct pci_device_id hpt3x2n[] = { | 554 | static const struct pci_device_id hpt3x2n[] = { |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index 727a81ce4c9f..b63d5e2d4628 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
| @@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 248 | ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); | 248 | ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); |
| 249 | } | 249 | } |
| 250 | pci_set_master(pdev); | 250 | pci_set_master(pdev); |
| 251 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 251 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 252 | IRQF_SHARED, &hpt3x3_sht); | 252 | IRQF_SHARED, &hpt3x3_sht); |
| 253 | } | 253 | } |
| 254 | 254 | ||
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index b56e8f722d20..9f2889fe43b2 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
| @@ -470,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) | |||
| 470 | pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); | 470 | pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); |
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, | 473 | return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0, |
| 474 | &pata_icside_sht); | 474 | &pata_icside_sht); |
| 475 | } | 475 | } |
| 476 | 476 | ||
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index f971f0de88e6..4d142a2ab8fd 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c | |||
| @@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
| 273 | dev_printk(KERN_DEBUG, &pdev->dev, | 273 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 274 | "version " DRV_VERSION "\n"); | 274 | "version " DRV_VERSION "\n"); |
| 275 | 275 | ||
| 276 | return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); | 276 | return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0); |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | static const struct pci_device_id it8213_pci_tbl[] = { | 279 | static const struct pci_device_id it8213_pci_tbl[] = { |
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 2bd2b002d14a..bf88f71a21f4 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
| @@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 933 | else | 933 | else |
| 934 | ppi[0] = &info_smart; | 934 | ppi[0] = &info_smart; |
| 935 | } | 935 | } |
| 936 | return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); | 936 | return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | #ifdef CONFIG_PM | 939 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 565e01e6ac7c..cb3babbb7035 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c | |||
| @@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
| 144 | }; | 144 | }; |
| 145 | const struct ata_port_info *ppi[] = { &info, NULL }; | 145 | const struct ata_port_info *ppi[] = { &info, NULL }; |
| 146 | 146 | ||
| 147 | return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); | 147 | return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static const struct pci_device_id jmicron_pci_tbl[] = { | 150 | static const struct pci_device_id jmicron_pci_tbl[] = { |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index b5b48e703cb7..76640ac76888 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c | |||
| @@ -1110,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, | |||
| 1110 | 1110 | ||
| 1111 | /* Start it up */ | 1111 | /* Start it up */ |
| 1112 | priv->irq = irq; | 1112 | priv->irq = irq; |
| 1113 | return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, | 1113 | return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, |
| 1114 | &pata_macio_sht); | 1114 | &pata_macio_sht); |
| 1115 | } | 1115 | } |
| 1116 | 1116 | ||
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index e8ca02e5a71d..dd38083dcbeb 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c | |||
| @@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
| 153 | return -ENODEV; | 153 | return -ENODEV; |
| 154 | } | 154 | } |
| 155 | #endif | 155 | #endif |
| 156 | return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); | 156 | return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | static const struct pci_device_id marvell_pci_tbl[] = { | 159 | static const struct pci_device_id marvell_pci_tbl[] = { |
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 36afe2c1c747..f087ab55b1df 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c | |||
| @@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, | |||
| 659 | ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); | 659 | ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); |
| 660 | 660 | ||
| 661 | /* activate host */ | 661 | /* activate host */ |
| 662 | return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, | 662 | return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, |
| 663 | &mpc52xx_ata_sht); | 663 | &mpc52xx_ata_sht); |
| 664 | } | 664 | } |
| 665 | 665 | ||
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 94f979a7f4f7..3eb921c746a1 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c | |||
| @@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
| 82 | ata_pci_bmdma_clear_simplex(pdev); | 82 | ata_pci_bmdma_clear_simplex(pdev); |
| 83 | 83 | ||
| 84 | /* And let the library code do the work */ | 84 | /* And let the library code do the work */ |
| 85 | return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); | 85 | return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static const struct pci_device_id netcell_pci_tbl[] = { | 88 | static const struct pci_device_id netcell_pci_tbl[] = { |
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index dd53a66b19e3..cc50bd09aa26 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c | |||
| @@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 149 | 149 | ||
| 150 | ninja32_program(base); | 150 | ninja32_program(base); |
| 151 | /* FIXME: Should we disable them at remove ? */ | 151 | /* FIXME: Should we disable them at remove ? */ |
| 152 | return ata_host_activate(host, dev->irq, ata_sff_interrupt, | 152 | return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, |
| 153 | IRQF_SHARED, &ninja32_sht); | 153 | IRQF_SHARED, &ninja32_sht); |
| 154 | } | 154 | } |
| 155 | 155 | ||
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index fdbba2d76d3e..605f198f958c 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c | |||
| @@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
| 380 | 380 | ||
| 381 | ns87415_fixup(pdev); | 381 | ns87415_fixup(pdev); |
| 382 | 382 | ||
| 383 | return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); | 383 | return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0); |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | static const struct pci_device_id ns87415_pci_tbl[] = { | 386 | static const struct pci_device_id ns87415_pci_tbl[] = { |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 3001109352ea..06ddd91ffeda 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
| @@ -750,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev) | |||
| 750 | } | 750 | } |
| 751 | 751 | ||
| 752 | /* | 752 | /* |
| 753 | * Trap if driver tries to do standard bmdma commands. They are not | ||
| 754 | * supported. | ||
| 755 | */ | ||
| 756 | static void unreachable_qc(struct ata_queued_cmd *qc) | ||
| 757 | { | ||
| 758 | BUG(); | ||
| 759 | } | ||
| 760 | |||
| 761 | static u8 unreachable_port(struct ata_port *ap) | ||
| 762 | { | ||
| 763 | BUG(); | ||
| 764 | } | ||
| 765 | |||
| 766 | /* | ||
| 767 | * We don't do ATAPI DMA so return 0. | 753 | * We don't do ATAPI DMA so return 0. |
| 768 | */ | 754 | */ |
| 769 | static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) | 755 | static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) |
| @@ -804,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = { | |||
| 804 | .sff_dev_select = octeon_cf_dev_select, | 790 | .sff_dev_select = octeon_cf_dev_select, |
| 805 | .sff_irq_on = octeon_cf_irq_on, | 791 | .sff_irq_on = octeon_cf_irq_on, |
| 806 | .sff_irq_clear = octeon_cf_irq_clear, | 792 | .sff_irq_clear = octeon_cf_irq_clear, |
| 807 | .bmdma_setup = unreachable_qc, | ||
| 808 | .bmdma_start = unreachable_qc, | ||
| 809 | .bmdma_stop = unreachable_qc, | ||
| 810 | .bmdma_status = unreachable_port, | ||
| 811 | .cable_detect = ata_cable_40wire, | 793 | .cable_detect = ata_cable_40wire, |
| 812 | .set_piomode = octeon_cf_set_piomode, | 794 | .set_piomode = octeon_cf_set_piomode, |
| 813 | .set_dmamode = octeon_cf_set_dmamode, | 795 | .set_dmamode = octeon_cf_set_dmamode, |
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 988ef2627be3..b811c1636204 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c | |||
| @@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
| 248 | dev_printk(KERN_DEBUG, &pdev->dev, | 248 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 249 | "version " DRV_VERSION "\n"); | 249 | "version " DRV_VERSION "\n"); |
| 250 | 250 | ||
| 251 | return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); | 251 | return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | static const struct pci_device_id oldpiix_pci_tbl[] = { | 254 | static const struct pci_device_id oldpiix_pci_tbl[] = { |
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 76b7d12b1e8d..0852cd07de08 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c | |||
| @@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 429 | if (optiplus_with_udma(dev)) | 429 | if (optiplus_with_udma(dev)) |
| 430 | ppi[0] = &info_82c700_udma; | 430 | ppi[0] = &info_82c700_udma; |
| 431 | 431 | ||
| 432 | return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); | 432 | return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0); |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static const struct pci_device_id optidma[] = { | 435 | static const struct pci_device_id optidma[] = { |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 09f1f22c0307..b18351122525 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
| @@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de | |||
| 754 | return -EIO; | 754 | return -EIO; |
| 755 | 755 | ||
| 756 | pci_set_master(pdev); | 756 | pci_set_master(pdev); |
| 757 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 757 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 758 | IRQF_SHARED, &pdc2027x_sht); | 758 | IRQF_SHARED, &pdc2027x_sht); |
| 759 | } | 759 | } |
| 760 | 760 | ||
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index fa1e2f3bc0fd..c39f213e1bbc 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c | |||
| @@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
| 337 | return -ENODEV; | 337 | return -ENODEV; |
| 338 | } | 338 | } |
| 339 | } | 339 | } |
| 340 | return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); | 340 | return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | static const struct pci_device_id pdc202xx[] = { | 343 | static const struct pci_device_id pdc202xx[] = { |
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c index 981615414849..cb01bf9496fe 100644 --- a/drivers/ata/pata_piccolo.c +++ b/drivers/ata/pata_piccolo.c | |||
| @@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
| 95 | }; | 95 | }; |
| 96 | const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; | 96 | const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; |
| 97 | /* Just one port for the moment */ | 97 | /* Just one port for the moment */ |
| 98 | return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); | 98 | return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static struct pci_device_id ata_tosh[] = { | 101 | static struct pci_device_id ata_tosh[] = { |
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index a5fa388e5398..8574b31f1773 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c | |||
| @@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
| 227 | dev_printk(KERN_DEBUG, &pdev->dev, | 227 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 228 | "version " DRV_VERSION "\n"); | 228 | "version " DRV_VERSION "\n"); |
| 229 | 229 | ||
| 230 | return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); | 230 | return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | static const struct pci_device_id radisys_pci_tbl[] = { | 233 | static const struct pci_device_id radisys_pci_tbl[] = { |
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 37092cfd7bc6..5fbe9b166c69 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c | |||
| @@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, | |||
| 344 | */ | 344 | */ |
| 345 | pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); | 345 | pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); |
| 346 | 346 | ||
| 347 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 347 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 348 | if (rc) | 348 | if (rc) |
| 349 | return rc; | 349 | return rc; |
| 350 | host->private_data = hpriv; | 350 | host->private_data = hpriv; |
| @@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, | |||
| 354 | host->flags |= ATA_HOST_PARALLEL_SCAN; | 354 | host->flags |= ATA_HOST_PARALLEL_SCAN; |
| 355 | 355 | ||
| 356 | pci_set_master(pdev); | 356 | pci_set_master(pdev); |
| 357 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); | 357 | return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht); |
| 358 | } | 358 | } |
| 359 | 359 | ||
| 360 | static void rdc_remove_one(struct pci_dev *pdev) | 360 | static void rdc_remove_one(struct pci_dev *pdev) |
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 6b5b63a2fd8e..e2c18257adff 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c | |||
| @@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 237 | }; | 237 | }; |
| 238 | const struct ata_port_info *ppi[] = { &info, NULL }; | 238 | const struct ata_port_info *ppi[] = { &info, NULL }; |
| 239 | 239 | ||
| 240 | return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); | 240 | return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | static const struct pci_device_id sc1200[] = { | 243 | static const struct pci_device_id sc1200[] = { |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 6f6193b707cb..d9db3f8d60ef 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
| @@ -875,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes) | |||
| 875 | * scc_irq_clear - Clear PCI IDE BMDMA interrupt. | 875 | * scc_irq_clear - Clear PCI IDE BMDMA interrupt. |
| 876 | * @ap: Port associated with this ATA transaction. | 876 | * @ap: Port associated with this ATA transaction. |
| 877 | * | 877 | * |
| 878 | * Note: Original code is ata_sff_irq_clear(). | 878 | * Note: Original code is ata_bmdma_irq_clear(). |
| 879 | */ | 879 | */ |
| 880 | 880 | ||
| 881 | static void scc_irq_clear (struct ata_port *ap) | 881 | static void scc_irq_clear (struct ata_port *ap) |
| @@ -1105,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1105 | if (rc) | 1105 | if (rc) |
| 1106 | return rc; | 1106 | return rc; |
| 1107 | 1107 | ||
| 1108 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 1108 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 1109 | IRQF_SHARED, &scc_sht); | 1109 | IRQF_SHARED, &scc_sht); |
| 1110 | } | 1110 | } |
| 1111 | 1111 | ||
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c index 86b3d0133c7c..e97b32f03a6e 100644 --- a/drivers/ata/pata_sch.c +++ b/drivers/ata/pata_sch.c | |||
| @@ -179,7 +179,7 @@ static int __devinit sch_init_one(struct pci_dev *pdev, | |||
| 179 | dev_printk(KERN_DEBUG, &pdev->dev, | 179 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 180 | "version " DRV_VERSION "\n"); | 180 | "version " DRV_VERSION "\n"); |
| 181 | 181 | ||
| 182 | return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0); | 182 | return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static int __init sch_init(void) | 185 | static int __init sch_init(void) |
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 43ea389df2b3..86dd714e3e1d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
| @@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id | |||
| 460 | if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) | 460 | if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) |
| 461 | ata_pci_bmdma_clear_simplex(pdev); | 461 | ata_pci_bmdma_clear_simplex(pdev); |
| 462 | 462 | ||
| 463 | return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); | 463 | return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); |
| 464 | } | 464 | } |
| 465 | 465 | ||
| 466 | #ifdef CONFIG_PM | 466 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 43faf106f647..d3190d7ec304 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
| @@ -374,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
| 374 | ata_sff_std_ports(&host->ports[1]->ioaddr); | 374 | ata_sff_std_ports(&host->ports[1]->ioaddr); |
| 375 | 375 | ||
| 376 | /* Register & activate */ | 376 | /* Register & activate */ |
| 377 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 377 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 378 | IRQF_SHARED, &sil680_sht); | 378 | IRQF_SHARED, &sil680_sht); |
| 379 | 379 | ||
| 380 | use_ioports: | 380 | use_ioports: |
| 381 | return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); | 381 | return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | #ifdef CONFIG_PM | 384 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index b6708032f321..60cea13cccce 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
| @@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 826 | 826 | ||
| 827 | sis_fixup(pdev, chipset); | 827 | sis_fixup(pdev, chipset); |
| 828 | 828 | ||
| 829 | return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); | 829 | return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0); |
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | #ifdef CONFIG_PM | 832 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 733b042a7469..98548f640c8e 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c | |||
| @@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
| 316 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; | 316 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; |
| 317 | pci_write_config_dword(dev, 0x40, val); | 317 | pci_write_config_dword(dev, 0x40, val); |
| 318 | 318 | ||
| 319 | return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); | 319 | return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); |
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | static const struct pci_device_id sl82c105[] = { | 322 | static const struct pci_device_id sl82c105[] = { |
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 48f50600ed2a..0d1f89e571dd 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c | |||
| @@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 201 | if (!printed_version++) | 201 | if (!printed_version++) |
| 202 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); | 202 | dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); |
| 203 | 203 | ||
| 204 | return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); | 204 | return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static const struct pci_device_id triflex[] = { | 207 | static const struct pci_device_id triflex[] = { |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 7e3e0a5598b7..5e659885de16 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
| @@ -627,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | /* We have established the device type, now fire it up */ | 629 | /* We have established the device type, now fire it up */ |
| 630 | return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); | 630 | return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | #ifdef CONFIG_PM | 633 | #ifdef CONFIG_PM |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f3471bc949d3..a476cd99b95d 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = { | |||
| 675 | .freeze = mv_eh_freeze, | 675 | .freeze = mv_eh_freeze, |
| 676 | .thaw = mv_eh_thaw, | 676 | .thaw = mv_eh_thaw, |
| 677 | .hardreset = mv_hardreset, | 677 | .hardreset = mv_hardreset, |
| 678 | .error_handler = ata_std_error_handler, /* avoid SFF EH */ | ||
| 679 | .post_internal_cmd = ATA_OP_NULL, | ||
| 680 | 678 | ||
| 681 | .scr_read = mv5_scr_read, | 679 | .scr_read = mv5_scr_read, |
| 682 | .scr_write = mv5_scr_write, | 680 | .scr_write = mv5_scr_write, |
| @@ -2813,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) | |||
| 2813 | } else if (!edma_was_enabled) { | 2811 | } else if (!edma_was_enabled) { |
| 2814 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | 2812 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); |
| 2815 | if (qc) | 2813 | if (qc) |
| 2816 | ata_sff_host_intr(ap, qc); | 2814 | ata_bmdma_port_intr(ap, qc); |
| 2817 | else | 2815 | else |
| 2818 | mv_unexpected_intr(ap, edma_was_enabled); | 2816 | mv_unexpected_intr(ap, edma_was_enabled); |
| 2819 | } | 2817 | } |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index baa8f0d2c86f..6fd114784116 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
| @@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) | |||
| 920 | } | 920 | } |
| 921 | 921 | ||
| 922 | /* handle interrupt */ | 922 | /* handle interrupt */ |
| 923 | return ata_sff_host_intr(ap, qc); | 923 | return ata_bmdma_port_intr(ap, qc); |
| 924 | } | 924 | } |
| 925 | 925 | ||
| 926 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | 926 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) |
| @@ -1100,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) | |||
| 1100 | u32 notifier_clears[2]; | 1100 | u32 notifier_clears[2]; |
| 1101 | 1101 | ||
| 1102 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { | 1102 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { |
| 1103 | ata_sff_irq_clear(ap); | 1103 | ata_bmdma_irq_clear(ap); |
| 1104 | return; | 1104 | return; |
| 1105 | } | 1105 | } |
| 1106 | 1106 | ||
| @@ -1505,7 +1505,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) | |||
| 1505 | 1505 | ||
| 1506 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1506 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
| 1507 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { | 1507 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 1508 | handled += ata_sff_host_intr(ap, qc); | 1508 | handled += ata_bmdma_port_intr(ap, qc); |
| 1509 | } else { | 1509 | } else { |
| 1510 | /* | 1510 | /* |
| 1511 | * No request pending? Clear interrupt status | 1511 | * No request pending? Clear interrupt status |
| @@ -2430,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2430 | 2430 | ||
| 2431 | ppi[0] = &nv_port_info[type]; | 2431 | ppi[0] = &nv_port_info[type]; |
| 2432 | ipriv = ppi[0]->private_data; | 2432 | ipriv = ppi[0]->private_data; |
| 2433 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 2433 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 2434 | if (rc) | 2434 | if (rc) |
| 2435 | return rc; | 2435 | return rc; |
| 2436 | 2436 | ||
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index d533b3d20ca1..daeebf19a6a9 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
| @@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host); | |||
| 120 | static void qs_qc_prep(struct ata_queued_cmd *qc); | 120 | static void qs_qc_prep(struct ata_queued_cmd *qc); |
| 121 | static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); | 121 | static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); |
| 122 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); | 122 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); |
| 123 | static void qs_bmdma_stop(struct ata_queued_cmd *qc); | ||
| 124 | static u8 qs_bmdma_status(struct ata_port *ap); | ||
| 125 | static void qs_freeze(struct ata_port *ap); | 123 | static void qs_freeze(struct ata_port *ap); |
| 126 | static void qs_thaw(struct ata_port *ap); | 124 | static void qs_thaw(struct ata_port *ap); |
| 127 | static int qs_prereset(struct ata_link *link, unsigned long deadline); | 125 | static int qs_prereset(struct ata_link *link, unsigned long deadline); |
| @@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = { | |||
| 137 | .inherits = &ata_sff_port_ops, | 135 | .inherits = &ata_sff_port_ops, |
| 138 | 136 | ||
| 139 | .check_atapi_dma = qs_check_atapi_dma, | 137 | .check_atapi_dma = qs_check_atapi_dma, |
| 140 | .bmdma_stop = qs_bmdma_stop, | ||
| 141 | .bmdma_status = qs_bmdma_status, | ||
| 142 | .qc_prep = qs_qc_prep, | 138 | .qc_prep = qs_qc_prep, |
| 143 | .qc_issue = qs_qc_issue, | 139 | .qc_issue = qs_qc_issue, |
| 144 | 140 | ||
| @@ -190,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc) | |||
| 190 | return 1; /* ATAPI DMA not supported */ | 186 | return 1; /* ATAPI DMA not supported */ |
| 191 | } | 187 | } |
| 192 | 188 | ||
| 193 | static void qs_bmdma_stop(struct ata_queued_cmd *qc) | ||
| 194 | { | ||
| 195 | /* nothing */ | ||
| 196 | } | ||
| 197 | |||
| 198 | static u8 qs_bmdma_status(struct ata_port *ap) | ||
| 199 | { | ||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | |||
| 203 | static inline void qs_enter_reg_mode(struct ata_port *ap) | 189 | static inline void qs_enter_reg_mode(struct ata_port *ap) |
| 204 | { | 190 | { |
| 205 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); | 191 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); |
| @@ -454,7 +440,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) | |||
| 454 | if (!pp || pp->state != qs_state_mmio) | 440 | if (!pp || pp->state != qs_state_mmio) |
| 455 | continue; | 441 | continue; |
| 456 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) | 442 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
| 457 | handled |= ata_sff_host_intr(ap, qc); | 443 | handled |= ata_sff_port_intr(ap, qc); |
| 458 | } | 444 | } |
| 459 | return handled; | 445 | return handled; |
| 460 | } | 446 | } |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 2dda312b6b9a..3a4f84219719 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
| @@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
| 503 | goto err_hsm; | 503 | goto err_hsm; |
| 504 | 504 | ||
| 505 | /* ack bmdma irq events */ | 505 | /* ack bmdma irq events */ |
| 506 | ata_sff_irq_clear(ap); | 506 | ata_bmdma_irq_clear(ap); |
| 507 | 507 | ||
| 508 | /* kick HSM in the ass */ | 508 | /* kick HSM in the ass */ |
| 509 | ata_sff_hsm_move(ap, qc, status, 0); | 509 | ata_sff_hsm_move(ap, qc, status, 0); |
| @@ -584,7 +584,7 @@ static void sil_thaw(struct ata_port *ap) | |||
| 584 | 584 | ||
| 585 | /* clear IRQ */ | 585 | /* clear IRQ */ |
| 586 | ap->ops->sff_check_status(ap); | 586 | ap->ops->sff_check_status(ap); |
| 587 | ata_sff_irq_clear(ap); | 587 | ata_bmdma_irq_clear(ap); |
| 588 | 588 | ||
| 589 | /* turn on SATA IRQ if supported */ | 589 | /* turn on SATA IRQ if supported */ |
| 590 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) | 590 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index f8a91bfd66a8..2bfe3ae03976 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
| @@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 279 | break; | 279 | break; |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 282 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 283 | if (rc) | 283 | if (rc) |
| 284 | return rc; | 284 | return rc; |
| 285 | 285 | ||
| @@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 308 | 308 | ||
| 309 | pci_set_master(pdev); | 309 | pci_set_master(pdev); |
| 310 | pci_intx(pdev, 1); | 310 | pci_intx(pdev, 1); |
| 311 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 311 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 312 | IRQF_SHARED, &sis_sht); | 312 | IRQF_SHARED, &sis_sht); |
| 313 | } | 313 | } |
| 314 | 314 | ||
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 101fd6a19829..7d9db4aaf07e 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
| @@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
| 502 | writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); | 502 | writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); |
| 503 | 503 | ||
| 504 | pci_set_master(pdev); | 504 | pci_set_master(pdev); |
| 505 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 505 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 506 | IRQF_SHARED, &k2_sata_sht); | 506 | IRQF_SHARED, &k2_sata_sht); |
| 507 | } | 507 | } |
| 508 | 508 | ||
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index d8dac17dc2c8..b8578c32d344 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
| @@ -242,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 242 | 242 | ||
| 243 | pci_set_master(pdev); | 243 | pci_set_master(pdev); |
| 244 | pci_intx(pdev, 1); | 244 | pci_intx(pdev, 1); |
| 245 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 245 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 246 | IRQF_SHARED, &uli_sht); | 246 | IRQF_SHARED, &uli_sht); |
| 247 | } | 247 | } |
| 248 | 248 | ||
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 08f65492cc81..101d8c219caf 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
| @@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap) | |||
| 308 | * certain way. Leave it alone and just clear pending IRQ. | 308 | * certain way. Leave it alone and just clear pending IRQ. |
| 309 | */ | 309 | */ |
| 310 | ap->ops->sff_check_status(ap); | 310 | ap->ops->sff_check_status(ap); |
| 311 | ata_sff_irq_clear(ap); | 311 | ata_bmdma_irq_clear(ap); |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | /** | 314 | /** |
| @@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
| 463 | struct ata_host *host; | 463 | struct ata_host *host; |
| 464 | int rc; | 464 | int rc; |
| 465 | 465 | ||
| 466 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 466 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 467 | if (rc) | 467 | if (rc) |
| 468 | return rc; | 468 | return rc; |
| 469 | *r_host = host; | 469 | *r_host = host; |
| @@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
| 520 | struct ata_host *host; | 520 | struct ata_host *host; |
| 521 | int i, rc; | 521 | int i, rc; |
| 522 | 522 | ||
| 523 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 523 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
| 524 | if (rc) | 524 | if (rc) |
| 525 | return rc; | 525 | return rc; |
| 526 | *r_host = host; | 526 | *r_host = host; |
| @@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 628 | svia_configure(pdev); | 628 | svia_configure(pdev); |
| 629 | 629 | ||
| 630 | pci_set_master(pdev); | 630 | pci_set_master(pdev); |
| 631 | return ata_host_activate(host, pdev->irq, ata_sff_interrupt, | 631 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| 632 | IRQF_SHARED, &svia_sht); | 632 | IRQF_SHARED, &svia_sht); |
| 633 | } | 633 | } |
| 634 | 634 | ||
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 2107952ebff1..b777176ff494 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
| @@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) | |||
| 245 | 245 | ||
| 246 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 246 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
| 247 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) | 247 | if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) |
| 248 | handled = ata_sff_host_intr(ap, qc); | 248 | handled = ata_bmdma_port_intr(ap, qc); |
| 249 | 249 | ||
| 250 | /* We received an interrupt during a polled command, | 250 | /* We received an interrupt during a polled command, |
| 251 | * or some other spurious condition. Interrupt reporting | 251 | * or some other spurious condition. Interrupt reporting |
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c index 606048b72bcf..85c004a518ee 100644 --- a/drivers/char/ps3flash.c +++ b/drivers/char/ps3flash.c | |||
| @@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id) | |||
| 305 | return ps3flash_writeback(ps3flash_dev); | 305 | return ps3flash_writeback(ps3flash_dev); |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | static int ps3flash_fsync(struct file *file, struct dentry *dentry, | 308 | static int ps3flash_fsync(struct file *file, int datasync) |
| 309 | int datasync) | ||
| 310 | { | 309 | { |
| 311 | return ps3flash_writeback(ps3flash_dev); | 310 | return ps3flash_writeback(ps3flash_dev); |
| 312 | } | 311 | } |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 12fdd3987a36..199488576a05 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
| 156 | 156 | ||
| 157 | if (dev->enabled) | 157 | if (dev->enabled) |
| 158 | return 0; | 158 | return 0; |
| 159 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | 159 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) |
| 160 | return -EIO; | 160 | return -EIO; |
| 161 | if (!dev->state_count) | 161 | if (!dev->state_count) |
| 162 | return -EINVAL; | 162 | return -EINVAL; |
| @@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
| 207 | { | 207 | { |
| 208 | if (!dev->enabled) | 208 | if (!dev->enabled) |
| 209 | return; | 209 | return; |
| 210 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | 210 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) |
| 211 | return; | 211 | return; |
| 212 | 212 | ||
| 213 | dev->enabled = 0; | 213 | dev->enabled = 0; |
| @@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
| 271 | { | 271 | { |
| 272 | int ret; | 272 | int ret; |
| 273 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | 273 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); |
| 274 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
| 274 | 275 | ||
| 275 | if (!sys_dev) | 276 | if (!sys_dev) |
| 276 | return -EINVAL; | 277 | return -EINVAL; |
| 277 | if (!try_module_get(cpuidle_curr_driver->owner)) | 278 | if (!try_module_get(cpuidle_driver->owner)) |
| 278 | return -EINVAL; | 279 | return -EINVAL; |
| 279 | 280 | ||
| 280 | init_completion(&dev->kobj_unregister); | 281 | init_completion(&dev->kobj_unregister); |
| @@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
| 284 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 285 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
| 285 | list_add(&dev->device_list, &cpuidle_detected_devices); | 286 | list_add(&dev->device_list, &cpuidle_detected_devices); |
| 286 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 287 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
| 287 | module_put(cpuidle_curr_driver->owner); | 288 | module_put(cpuidle_driver->owner); |
| 288 | return ret; | 289 | return ret; |
| 289 | } | 290 | } |
| 290 | 291 | ||
| @@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); | |||
| 325 | void cpuidle_unregister_device(struct cpuidle_device *dev) | 326 | void cpuidle_unregister_device(struct cpuidle_device *dev) |
| 326 | { | 327 | { |
| 327 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | 328 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); |
| 329 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
| 328 | 330 | ||
| 329 | if (dev->registered == 0) | 331 | if (dev->registered == 0) |
| 330 | return; | 332 | return; |
| @@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) | |||
| 340 | 342 | ||
| 341 | cpuidle_resume_and_unlock(); | 343 | cpuidle_resume_and_unlock(); |
| 342 | 344 | ||
| 343 | module_put(cpuidle_curr_driver->owner); | 345 | module_put(cpuidle_driver->owner); |
| 344 | } | 346 | } |
| 345 | 347 | ||
| 346 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | 348 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); |
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 9476ba33ee2c..33e50d556f17 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | 9 | ||
| 10 | /* For internal use only */ | 10 | /* For internal use only */ |
| 11 | extern struct cpuidle_governor *cpuidle_curr_governor; | 11 | extern struct cpuidle_governor *cpuidle_curr_governor; |
| 12 | extern struct cpuidle_driver *cpuidle_curr_driver; | ||
| 13 | extern struct list_head cpuidle_governors; | 12 | extern struct list_head cpuidle_governors; |
| 14 | extern struct list_head cpuidle_detected_devices; | 13 | extern struct list_head cpuidle_detected_devices; |
| 15 | extern struct mutex cpuidle_lock; | 14 | extern struct mutex cpuidle_lock; |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 2257004fe33d..fd1601e3d125 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include "cpuidle.h" | 15 | #include "cpuidle.h" |
| 16 | 16 | ||
| 17 | struct cpuidle_driver *cpuidle_curr_driver; | 17 | static struct cpuidle_driver *cpuidle_curr_driver; |
| 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
| 19 | 19 | ||
| 20 | /** | 20 | /** |
| @@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
| 40 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | 40 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); |
| 41 | 41 | ||
| 42 | /** | 42 | /** |
| 43 | * cpuidle_get_driver - return the current driver | ||
| 44 | */ | ||
| 45 | struct cpuidle_driver *cpuidle_get_driver(void) | ||
| 46 | { | ||
| 47 | return cpuidle_curr_driver; | ||
| 48 | } | ||
| 49 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | ||
| 50 | |||
| 51 | /** | ||
| 43 | * cpuidle_unregister_driver - unregisters a driver | 52 | * cpuidle_unregister_driver - unregisters a driver |
| 44 | * @drv: the driver | 53 | * @drv: the driver |
| 45 | */ | 54 | */ |
| 46 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | 55 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
| 47 | { | 56 | { |
| 48 | if (!drv) | 57 | if (drv != cpuidle_curr_driver) { |
| 58 | WARN(1, "invalid cpuidle_unregister_driver(%s)\n", | ||
| 59 | drv->name); | ||
| 49 | return; | 60 | return; |
| 61 | } | ||
| 50 | 62 | ||
| 51 | spin_lock(&cpuidle_driver_lock); | 63 | spin_lock(&cpuidle_driver_lock); |
| 52 | cpuidle_curr_driver = NULL; | 64 | cpuidle_curr_driver = NULL; |
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0ba9c8b8ee74..0310ffaec9df 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
| @@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class, | |||
| 47 | char *buf) | 47 | char *buf) |
| 48 | { | 48 | { |
| 49 | ssize_t ret; | 49 | ssize_t ret; |
| 50 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
| 50 | 51 | ||
| 51 | spin_lock(&cpuidle_driver_lock); | 52 | spin_lock(&cpuidle_driver_lock); |
| 52 | if (cpuidle_curr_driver) | 53 | if (cpuidle_driver) |
| 53 | ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); | 54 | ret = sprintf(buf, "%s\n", cpuidle_driver->name); |
| 54 | else | 55 | else |
| 55 | ret = sprintf(buf, "none\n"); | 56 | ret = sprintf(buf, "none\n"); |
| 56 | spin_unlock(&cpuidle_driver_lock); | 57 | spin_unlock(&cpuidle_driver_lock); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1b8877922fb0..9e01e96fee94 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -166,6 +166,15 @@ config TIMB_DMA | |||
| 166 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 166 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
| 167 | bool | 167 | bool |
| 168 | 168 | ||
| 169 | config PL330_DMA | ||
| 170 | tristate "DMA API Driver for PL330" | ||
| 171 | select DMA_ENGINE | ||
| 172 | depends on PL330 | ||
| 173 | help | ||
| 174 | Select if your platform has one or more PL330 DMACs. | ||
| 175 | You need to provide platform specific settings via | ||
| 176 | platform_data for a dma-pl330 device. | ||
| 177 | |||
| 169 | config DMA_ENGINE | 178 | config DMA_ENGINE |
| 170 | bool | 179 | bool |
| 171 | 180 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 20881426c1ac..0fe5ebbfda5d 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -22,3 +22,4 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | |||
| 22 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 22 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
| 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
| 24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
| 25 | obj-$(CONFIG_PL330_DMA) += pl330.o | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c new file mode 100644 index 000000000000..7c50f6dfd3f4 --- /dev/null +++ b/drivers/dma/pl330.c | |||
| @@ -0,0 +1,866 @@ | |||
| 1 | /* linux/drivers/dma/pl330.c | ||
| 2 | * | ||
| 3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | ||
| 4 | * Jaswinder Singh <jassi.brar@samsung.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/io.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/slab.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/dmaengine.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/amba/bus.h> | ||
| 19 | #include <linux/amba/pl330.h> | ||
| 20 | |||
| 21 | #define NR_DEFAULT_DESC 16 | ||
| 22 | |||
| 23 | enum desc_status { | ||
| 24 | /* In the DMAC pool */ | ||
| 25 | FREE, | ||
| 26 | /* | ||
| 27 | * Allocted to some channel during prep_xxx | ||
| 28 | * Also may be sitting on the work_list. | ||
| 29 | */ | ||
| 30 | PREP, | ||
| 31 | /* | ||
| 32 | * Sitting on the work_list and already submitted | ||
| 33 | * to the PL330 core. Not more than two descriptors | ||
| 34 | * of a channel can be BUSY at any time. | ||
| 35 | */ | ||
| 36 | BUSY, | ||
| 37 | /* | ||
| 38 | * Sitting on the channel work_list but xfer done | ||
| 39 | * by PL330 core | ||
| 40 | */ | ||
| 41 | DONE, | ||
| 42 | }; | ||
| 43 | |||
| 44 | struct dma_pl330_chan { | ||
| 45 | /* Schedule desc completion */ | ||
| 46 | struct tasklet_struct task; | ||
| 47 | |||
| 48 | /* DMA-Engine Channel */ | ||
| 49 | struct dma_chan chan; | ||
| 50 | |||
| 51 | /* Last completed cookie */ | ||
| 52 | dma_cookie_t completed; | ||
| 53 | |||
| 54 | /* List of to be xfered descriptors */ | ||
| 55 | struct list_head work_list; | ||
| 56 | |||
| 57 | /* Pointer to the DMAC that manages this channel, | ||
| 58 | * NULL if the channel is available to be acquired. | ||
| 59 | * As the parent, this DMAC also provides descriptors | ||
| 60 | * to the channel. | ||
| 61 | */ | ||
| 62 | struct dma_pl330_dmac *dmac; | ||
| 63 | |||
| 64 | /* To protect channel manipulation */ | ||
| 65 | spinlock_t lock; | ||
| 66 | |||
| 67 | /* Token of a hardware channel thread of PL330 DMAC | ||
| 68 | * NULL if the channel is available to be acquired. | ||
| 69 | */ | ||
| 70 | void *pl330_chid; | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct dma_pl330_dmac { | ||
| 74 | struct pl330_info pif; | ||
| 75 | |||
| 76 | /* DMA-Engine Device */ | ||
| 77 | struct dma_device ddma; | ||
| 78 | |||
| 79 | /* Pool of descriptors available for the DMAC's channels */ | ||
| 80 | struct list_head desc_pool; | ||
| 81 | /* To protect desc_pool manipulation */ | ||
| 82 | spinlock_t pool_lock; | ||
| 83 | |||
| 84 | /* Peripheral channels connected to this DMAC */ | ||
| 85 | struct dma_pl330_chan peripherals[0]; /* keep at end */ | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct dma_pl330_desc { | ||
| 89 | /* To attach to a queue as child */ | ||
| 90 | struct list_head node; | ||
| 91 | |||
| 92 | /* Descriptor for the DMA Engine API */ | ||
| 93 | struct dma_async_tx_descriptor txd; | ||
| 94 | |||
| 95 | /* Xfer for PL330 core */ | ||
| 96 | struct pl330_xfer px; | ||
| 97 | |||
| 98 | struct pl330_reqcfg rqcfg; | ||
| 99 | struct pl330_req req; | ||
| 100 | |||
| 101 | enum desc_status status; | ||
| 102 | |||
| 103 | /* The channel which currently holds this desc */ | ||
| 104 | struct dma_pl330_chan *pchan; | ||
| 105 | }; | ||
| 106 | |||
| 107 | static inline struct dma_pl330_chan * | ||
| 108 | to_pchan(struct dma_chan *ch) | ||
| 109 | { | ||
| 110 | if (!ch) | ||
| 111 | return NULL; | ||
| 112 | |||
| 113 | return container_of(ch, struct dma_pl330_chan, chan); | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline struct dma_pl330_desc * | ||
| 117 | to_desc(struct dma_async_tx_descriptor *tx) | ||
| 118 | { | ||
| 119 | return container_of(tx, struct dma_pl330_desc, txd); | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void free_desc_list(struct list_head *list) | ||
| 123 | { | ||
| 124 | struct dma_pl330_dmac *pdmac; | ||
| 125 | struct dma_pl330_desc *desc; | ||
| 126 | struct dma_pl330_chan *pch; | ||
| 127 | unsigned long flags; | ||
| 128 | |||
| 129 | if (list_empty(list)) | ||
| 130 | return; | ||
| 131 | |||
| 132 | /* Finish off the work list */ | ||
| 133 | list_for_each_entry(desc, list, node) { | ||
| 134 | dma_async_tx_callback callback; | ||
| 135 | void *param; | ||
| 136 | |||
| 137 | /* All desc in a list belong to same channel */ | ||
| 138 | pch = desc->pchan; | ||
| 139 | callback = desc->txd.callback; | ||
| 140 | param = desc->txd.callback_param; | ||
| 141 | |||
| 142 | if (callback) | ||
| 143 | callback(param); | ||
| 144 | |||
| 145 | desc->pchan = NULL; | ||
| 146 | } | ||
| 147 | |||
| 148 | pdmac = pch->dmac; | ||
| 149 | |||
| 150 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
| 151 | list_splice_tail_init(list, &pdmac->desc_pool); | ||
| 152 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void fill_queue(struct dma_pl330_chan *pch) | ||
| 156 | { | ||
| 157 | struct dma_pl330_desc *desc; | ||
| 158 | int ret; | ||
| 159 | |||
| 160 | list_for_each_entry(desc, &pch->work_list, node) { | ||
| 161 | |||
| 162 | /* If already submitted */ | ||
| 163 | if (desc->status == BUSY) | ||
| 164 | break; | ||
| 165 | |||
| 166 | ret = pl330_submit_req(pch->pl330_chid, | ||
| 167 | &desc->req); | ||
| 168 | if (!ret) { | ||
| 169 | desc->status = BUSY; | ||
| 170 | break; | ||
| 171 | } else if (ret == -EAGAIN) { | ||
| 172 | /* QFull or DMAC Dying */ | ||
| 173 | break; | ||
| 174 | } else { | ||
| 175 | /* Unacceptable request */ | ||
| 176 | desc->status = DONE; | ||
| 177 | dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", | ||
| 178 | __func__, __LINE__, desc->txd.cookie); | ||
| 179 | tasklet_schedule(&pch->task); | ||
| 180 | } | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | static void pl330_tasklet(unsigned long data) | ||
| 185 | { | ||
| 186 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; | ||
| 187 | struct dma_pl330_desc *desc, *_dt; | ||
| 188 | unsigned long flags; | ||
| 189 | LIST_HEAD(list); | ||
| 190 | |||
| 191 | spin_lock_irqsave(&pch->lock, flags); | ||
| 192 | |||
| 193 | /* Pick up ripe tomatoes */ | ||
| 194 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | ||
| 195 | if (desc->status == DONE) { | ||
| 196 | pch->completed = desc->txd.cookie; | ||
| 197 | list_move_tail(&desc->node, &list); | ||
| 198 | } | ||
| 199 | |||
| 200 | /* Try to submit a req imm. next to the last completed cookie */ | ||
| 201 | fill_queue(pch); | ||
| 202 | |||
| 203 | /* Make sure the PL330 Channel thread is active */ | ||
| 204 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); | ||
| 205 | |||
| 206 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 207 | |||
| 208 | free_desc_list(&list); | ||
| 209 | } | ||
| 210 | |||
| 211 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | ||
| 212 | { | ||
| 213 | struct dma_pl330_desc *desc = token; | ||
| 214 | struct dma_pl330_chan *pch = desc->pchan; | ||
| 215 | unsigned long flags; | ||
| 216 | |||
| 217 | /* If desc aborted */ | ||
| 218 | if (!pch) | ||
| 219 | return; | ||
| 220 | |||
| 221 | spin_lock_irqsave(&pch->lock, flags); | ||
| 222 | |||
| 223 | desc->status = DONE; | ||
| 224 | |||
| 225 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 226 | |||
| 227 | tasklet_schedule(&pch->task); | ||
| 228 | } | ||
| 229 | |||
| 230 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | ||
| 231 | { | ||
| 232 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 233 | struct dma_pl330_dmac *pdmac = pch->dmac; | ||
| 234 | unsigned long flags; | ||
| 235 | |||
| 236 | spin_lock_irqsave(&pch->lock, flags); | ||
| 237 | |||
| 238 | pch->completed = chan->cookie = 1; | ||
| 239 | |||
| 240 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | ||
| 241 | if (!pch->pl330_chid) { | ||
| 242 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 243 | return 0; | ||
| 244 | } | ||
| 245 | |||
| 246 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | ||
| 247 | |||
| 248 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 249 | |||
| 250 | return 1; | ||
| 251 | } | ||
| 252 | |||
| 253 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) | ||
| 254 | { | ||
| 255 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 256 | struct dma_pl330_desc *desc; | ||
| 257 | unsigned long flags; | ||
| 258 | |||
| 259 | /* Only supports DMA_TERMINATE_ALL */ | ||
| 260 | if (cmd != DMA_TERMINATE_ALL) | ||
| 261 | return -ENXIO; | ||
| 262 | |||
| 263 | spin_lock_irqsave(&pch->lock, flags); | ||
| 264 | |||
| 265 | /* FLUSH the PL330 Channel thread */ | ||
| 266 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); | ||
| 267 | |||
| 268 | /* Mark all desc done */ | ||
| 269 | list_for_each_entry(desc, &pch->work_list, node) | ||
| 270 | desc->status = DONE; | ||
| 271 | |||
| 272 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 273 | |||
| 274 | pl330_tasklet((unsigned long) pch); | ||
| 275 | |||
| 276 | return 0; | ||
| 277 | } | ||
| 278 | |||
| 279 | static void pl330_free_chan_resources(struct dma_chan *chan) | ||
| 280 | { | ||
| 281 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 282 | unsigned long flags; | ||
| 283 | |||
| 284 | spin_lock_irqsave(&pch->lock, flags); | ||
| 285 | |||
| 286 | tasklet_kill(&pch->task); | ||
| 287 | |||
| 288 | pl330_release_channel(pch->pl330_chid); | ||
| 289 | pch->pl330_chid = NULL; | ||
| 290 | |||
| 291 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 292 | } | ||
| 293 | |||
| 294 | static enum dma_status | ||
| 295 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
| 296 | struct dma_tx_state *txstate) | ||
| 297 | { | ||
| 298 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 299 | dma_cookie_t last_done, last_used; | ||
| 300 | int ret; | ||
| 301 | |||
| 302 | last_done = pch->completed; | ||
| 303 | last_used = chan->cookie; | ||
| 304 | |||
| 305 | ret = dma_async_is_complete(cookie, last_done, last_used); | ||
| 306 | |||
| 307 | dma_set_tx_state(txstate, last_done, last_used, 0); | ||
| 308 | |||
| 309 | return ret; | ||
| 310 | } | ||
| 311 | |||
| 312 | static void pl330_issue_pending(struct dma_chan *chan) | ||
| 313 | { | ||
| 314 | pl330_tasklet((unsigned long) to_pchan(chan)); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* | ||
| 318 | * We returned the last one of the circular list of descriptor(s) | ||
| 319 | * from prep_xxx, so the argument to submit corresponds to the last | ||
| 320 | * descriptor of the list. | ||
| 321 | */ | ||
| 322 | static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 323 | { | ||
| 324 | struct dma_pl330_desc *desc, *last = to_desc(tx); | ||
| 325 | struct dma_pl330_chan *pch = to_pchan(tx->chan); | ||
| 326 | dma_cookie_t cookie; | ||
| 327 | unsigned long flags; | ||
| 328 | |||
| 329 | spin_lock_irqsave(&pch->lock, flags); | ||
| 330 | |||
| 331 | /* Assign cookies to all nodes */ | ||
| 332 | cookie = tx->chan->cookie; | ||
| 333 | |||
| 334 | while (!list_empty(&last->node)) { | ||
| 335 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | ||
| 336 | |||
| 337 | if (++cookie < 0) | ||
| 338 | cookie = 1; | ||
| 339 | desc->txd.cookie = cookie; | ||
| 340 | |||
| 341 | list_move_tail(&desc->node, &pch->work_list); | ||
| 342 | } | ||
| 343 | |||
| 344 | if (++cookie < 0) | ||
| 345 | cookie = 1; | ||
| 346 | last->txd.cookie = cookie; | ||
| 347 | |||
| 348 | list_add_tail(&last->node, &pch->work_list); | ||
| 349 | |||
| 350 | tx->chan->cookie = cookie; | ||
| 351 | |||
| 352 | spin_unlock_irqrestore(&pch->lock, flags); | ||
| 353 | |||
| 354 | return cookie; | ||
| 355 | } | ||
| 356 | |||
| 357 | static inline void _init_desc(struct dma_pl330_desc *desc) | ||
| 358 | { | ||
| 359 | desc->pchan = NULL; | ||
| 360 | desc->req.x = &desc->px; | ||
| 361 | desc->req.token = desc; | ||
| 362 | desc->rqcfg.swap = SWAP_NO; | ||
| 363 | desc->rqcfg.privileged = 0; | ||
| 364 | desc->rqcfg.insnaccess = 0; | ||
| 365 | desc->rqcfg.scctl = SCCTRL0; | ||
| 366 | desc->rqcfg.dcctl = DCCTRL0; | ||
| 367 | desc->req.cfg = &desc->rqcfg; | ||
| 368 | desc->req.xfer_cb = dma_pl330_rqcb; | ||
| 369 | desc->txd.tx_submit = pl330_tx_submit; | ||
| 370 | |||
| 371 | INIT_LIST_HEAD(&desc->node); | ||
| 372 | } | ||
| 373 | |||
| 374 | /* Returns the number of descriptors added to the DMAC pool */ | ||
| 375 | int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | ||
| 376 | { | ||
| 377 | struct dma_pl330_desc *desc; | ||
| 378 | unsigned long flags; | ||
| 379 | int i; | ||
| 380 | |||
| 381 | if (!pdmac) | ||
| 382 | return 0; | ||
| 383 | |||
| 384 | desc = kmalloc(count * sizeof(*desc), flg); | ||
| 385 | if (!desc) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
| 389 | |||
| 390 | for (i = 0; i < count; i++) { | ||
| 391 | _init_desc(&desc[i]); | ||
| 392 | list_add_tail(&desc[i].node, &pdmac->desc_pool); | ||
| 393 | } | ||
| 394 | |||
| 395 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
| 396 | |||
| 397 | return count; | ||
| 398 | } | ||
| 399 | |||
| 400 | static struct dma_pl330_desc * | ||
| 401 | pluck_desc(struct dma_pl330_dmac *pdmac) | ||
| 402 | { | ||
| 403 | struct dma_pl330_desc *desc = NULL; | ||
| 404 | unsigned long flags; | ||
| 405 | |||
| 406 | if (!pdmac) | ||
| 407 | return NULL; | ||
| 408 | |||
| 409 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
| 410 | |||
| 411 | if (!list_empty(&pdmac->desc_pool)) { | ||
| 412 | desc = list_entry(pdmac->desc_pool.next, | ||
| 413 | struct dma_pl330_desc, node); | ||
| 414 | |||
| 415 | list_del_init(&desc->node); | ||
| 416 | |||
| 417 | desc->status = PREP; | ||
| 418 | desc->txd.callback = NULL; | ||
| 419 | } | ||
| 420 | |||
| 421 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
| 422 | |||
| 423 | return desc; | ||
| 424 | } | ||
| 425 | |||
| 426 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | ||
| 427 | { | ||
| 428 | struct dma_pl330_dmac *pdmac = pch->dmac; | ||
| 429 | struct dma_pl330_peri *peri = pch->chan.private; | ||
| 430 | struct dma_pl330_desc *desc; | ||
| 431 | |||
| 432 | /* Pluck one desc from the pool of DMAC */ | ||
| 433 | desc = pluck_desc(pdmac); | ||
| 434 | |||
| 435 | /* If the DMAC pool is empty, alloc new */ | ||
| 436 | if (!desc) { | ||
| 437 | if (!add_desc(pdmac, GFP_ATOMIC, 1)) | ||
| 438 | return NULL; | ||
| 439 | |||
| 440 | /* Try again */ | ||
| 441 | desc = pluck_desc(pdmac); | ||
| 442 | if (!desc) { | ||
| 443 | dev_err(pch->dmac->pif.dev, | ||
| 444 | "%s:%d ALERT!\n", __func__, __LINE__); | ||
| 445 | return NULL; | ||
| 446 | } | ||
| 447 | } | ||
| 448 | |||
| 449 | /* Initialize the descriptor */ | ||
| 450 | desc->pchan = pch; | ||
| 451 | desc->txd.cookie = 0; | ||
| 452 | async_tx_ack(&desc->txd); | ||
| 453 | |||
| 454 | desc->req.rqtype = peri->rqtype; | ||
| 455 | desc->req.peri = peri->peri_id; | ||
| 456 | |||
| 457 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | ||
| 458 | |||
| 459 | return desc; | ||
| 460 | } | ||
| 461 | |||
| 462 | static inline void fill_px(struct pl330_xfer *px, | ||
| 463 | dma_addr_t dst, dma_addr_t src, size_t len) | ||
| 464 | { | ||
| 465 | px->next = NULL; | ||
| 466 | px->bytes = len; | ||
| 467 | px->dst_addr = dst; | ||
| 468 | px->src_addr = src; | ||
| 469 | } | ||
| 470 | |||
| 471 | static struct dma_pl330_desc * | ||
| 472 | __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, | ||
| 473 | dma_addr_t src, size_t len) | ||
| 474 | { | ||
| 475 | struct dma_pl330_desc *desc = pl330_get_desc(pch); | ||
| 476 | |||
| 477 | if (!desc) { | ||
| 478 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | ||
| 479 | __func__, __LINE__); | ||
| 480 | return NULL; | ||
| 481 | } | ||
| 482 | |||
| 483 | /* | ||
| 484 | * Ideally we should lookout for reqs bigger than | ||
| 485 | * those that can be programmed with 256 bytes of | ||
| 486 | * MC buffer, but considering a req size is seldom | ||
| 487 | * going to be word-unaligned and more than 200MB, | ||
| 488 | * we take it easy. | ||
| 489 | * Also, should the limit is reached we'd rather | ||
| 490 | * have the platform increase MC buffer size than | ||
| 491 | * complicating this API driver. | ||
| 492 | */ | ||
| 493 | fill_px(&desc->px, dst, src, len); | ||
| 494 | |||
| 495 | return desc; | ||
| 496 | } | ||
| 497 | |||
| 498 | /* Call after fixing burst size */ | ||
| 499 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | ||
| 500 | { | ||
| 501 | struct dma_pl330_chan *pch = desc->pchan; | ||
| 502 | struct pl330_info *pi = &pch->dmac->pif; | ||
| 503 | int burst_len; | ||
| 504 | |||
| 505 | burst_len = pi->pcfg.data_bus_width / 8; | ||
| 506 | burst_len *= pi->pcfg.data_buf_dep; | ||
| 507 | burst_len >>= desc->rqcfg.brst_size; | ||
| 508 | |||
| 509 | /* src/dst_burst_len can't be more than 16 */ | ||
| 510 | if (burst_len > 16) | ||
| 511 | burst_len = 16; | ||
| 512 | |||
| 513 | while (burst_len > 1) { | ||
| 514 | if (!(len % (burst_len << desc->rqcfg.brst_size))) | ||
| 515 | break; | ||
| 516 | burst_len--; | ||
| 517 | } | ||
| 518 | |||
| 519 | return burst_len; | ||
| 520 | } | ||
| 521 | |||
| 522 | static struct dma_async_tx_descriptor * | ||
| 523 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | ||
| 524 | dma_addr_t src, size_t len, unsigned long flags) | ||
| 525 | { | ||
| 526 | struct dma_pl330_desc *desc; | ||
| 527 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 528 | struct dma_pl330_peri *peri = chan->private; | ||
| 529 | struct pl330_info *pi; | ||
| 530 | int burst; | ||
| 531 | |||
| 532 | if (unlikely(!pch || !len || !peri)) | ||
| 533 | return NULL; | ||
| 534 | |||
| 535 | if (peri->rqtype != MEMTOMEM) | ||
| 536 | return NULL; | ||
| 537 | |||
| 538 | pi = &pch->dmac->pif; | ||
| 539 | |||
| 540 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | ||
| 541 | if (!desc) | ||
| 542 | return NULL; | ||
| 543 | |||
| 544 | desc->rqcfg.src_inc = 1; | ||
| 545 | desc->rqcfg.dst_inc = 1; | ||
| 546 | |||
| 547 | /* Select max possible burst size */ | ||
| 548 | burst = pi->pcfg.data_bus_width / 8; | ||
| 549 | |||
| 550 | while (burst > 1) { | ||
| 551 | if (!(len % burst)) | ||
| 552 | break; | ||
| 553 | burst /= 2; | ||
| 554 | } | ||
| 555 | |||
| 556 | desc->rqcfg.brst_size = 0; | ||
| 557 | while (burst != (1 << desc->rqcfg.brst_size)) | ||
| 558 | desc->rqcfg.brst_size++; | ||
| 559 | |||
| 560 | desc->rqcfg.brst_len = get_burst_len(desc, len); | ||
| 561 | |||
| 562 | desc->txd.flags = flags; | ||
| 563 | |||
| 564 | return &desc->txd; | ||
| 565 | } | ||
| 566 | |||
| 567 | static struct dma_async_tx_descriptor * | ||
| 568 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
| 569 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 570 | unsigned long flg) | ||
| 571 | { | ||
| 572 | struct dma_pl330_desc *first, *desc = NULL; | ||
| 573 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 574 | struct dma_pl330_peri *peri = chan->private; | ||
| 575 | struct scatterlist *sg; | ||
| 576 | unsigned long flags; | ||
| 577 | int i, burst_size; | ||
| 578 | dma_addr_t addr; | ||
| 579 | |||
| 580 | if (unlikely(!pch || !sgl || !sg_len)) | ||
| 581 | return NULL; | ||
| 582 | |||
| 583 | /* Make sure the direction is consistent */ | ||
| 584 | if ((direction == DMA_TO_DEVICE && | ||
| 585 | peri->rqtype != MEMTODEV) || | ||
| 586 | (direction == DMA_FROM_DEVICE && | ||
| 587 | peri->rqtype != DEVTOMEM)) { | ||
| 588 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", | ||
| 589 | __func__, __LINE__); | ||
| 590 | return NULL; | ||
| 591 | } | ||
| 592 | |||
| 593 | addr = peri->fifo_addr; | ||
| 594 | burst_size = peri->burst_sz; | ||
| 595 | |||
| 596 | first = NULL; | ||
| 597 | |||
| 598 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 599 | |||
| 600 | desc = pl330_get_desc(pch); | ||
| 601 | if (!desc) { | ||
| 602 | struct dma_pl330_dmac *pdmac = pch->dmac; | ||
| 603 | |||
| 604 | dev_err(pch->dmac->pif.dev, | ||
| 605 | "%s:%d Unable to fetch desc\n", | ||
| 606 | __func__, __LINE__); | ||
| 607 | if (!first) | ||
| 608 | return NULL; | ||
| 609 | |||
| 610 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
| 611 | |||
| 612 | while (!list_empty(&first->node)) { | ||
| 613 | desc = list_entry(first->node.next, | ||
| 614 | struct dma_pl330_desc, node); | ||
| 615 | list_move_tail(&desc->node, &pdmac->desc_pool); | ||
| 616 | } | ||
| 617 | |||
| 618 | list_move_tail(&first->node, &pdmac->desc_pool); | ||
| 619 | |||
| 620 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
| 621 | |||
| 622 | return NULL; | ||
| 623 | } | ||
| 624 | |||
| 625 | if (!first) | ||
| 626 | first = desc; | ||
| 627 | else | ||
| 628 | list_add_tail(&desc->node, &first->node); | ||
| 629 | |||
| 630 | if (direction == DMA_TO_DEVICE) { | ||
| 631 | desc->rqcfg.src_inc = 1; | ||
| 632 | desc->rqcfg.dst_inc = 0; | ||
| 633 | fill_px(&desc->px, | ||
| 634 | addr, sg_dma_address(sg), sg_dma_len(sg)); | ||
| 635 | } else { | ||
| 636 | desc->rqcfg.src_inc = 0; | ||
| 637 | desc->rqcfg.dst_inc = 1; | ||
| 638 | fill_px(&desc->px, | ||
| 639 | sg_dma_address(sg), addr, sg_dma_len(sg)); | ||
| 640 | } | ||
| 641 | |||
| 642 | desc->rqcfg.brst_size = burst_size; | ||
| 643 | desc->rqcfg.brst_len = 1; | ||
| 644 | } | ||
| 645 | |||
| 646 | /* Return the last desc in the chain */ | ||
| 647 | desc->txd.flags = flg; | ||
| 648 | return &desc->txd; | ||
| 649 | } | ||
| 650 | |||
| 651 | static irqreturn_t pl330_irq_handler(int irq, void *data) | ||
| 652 | { | ||
| 653 | if (pl330_update(data)) | ||
| 654 | return IRQ_HANDLED; | ||
| 655 | else | ||
| 656 | return IRQ_NONE; | ||
| 657 | } | ||
| 658 | |||
| 659 | static int __devinit | ||
| 660 | pl330_probe(struct amba_device *adev, struct amba_id *id) | ||
| 661 | { | ||
| 662 | struct dma_pl330_platdata *pdat; | ||
| 663 | struct dma_pl330_dmac *pdmac; | ||
| 664 | struct dma_pl330_chan *pch; | ||
| 665 | struct pl330_info *pi; | ||
| 666 | struct dma_device *pd; | ||
| 667 | struct resource *res; | ||
| 668 | int i, ret, irq; | ||
| 669 | |||
| 670 | pdat = adev->dev.platform_data; | ||
| 671 | |||
| 672 | if (!pdat || !pdat->nr_valid_peri) { | ||
| 673 | dev_err(&adev->dev, "platform data missing\n"); | ||
| 674 | return -ENODEV; | ||
| 675 | } | ||
| 676 | |||
| 677 | /* Allocate a new DMAC and its Channels */ | ||
| 678 | pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) | ||
| 679 | + sizeof(*pdmac), GFP_KERNEL); | ||
| 680 | if (!pdmac) { | ||
| 681 | dev_err(&adev->dev, "unable to allocate mem\n"); | ||
| 682 | return -ENOMEM; | ||
| 683 | } | ||
| 684 | |||
| 685 | pi = &pdmac->pif; | ||
| 686 | pi->dev = &adev->dev; | ||
| 687 | pi->pl330_data = NULL; | ||
| 688 | pi->mcbufsz = pdat->mcbuf_sz; | ||
| 689 | |||
| 690 | res = &adev->res; | ||
| 691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | ||
| 692 | |||
| 693 | pi->base = ioremap(res->start, resource_size(res)); | ||
| 694 | if (!pi->base) { | ||
| 695 | ret = -ENXIO; | ||
| 696 | goto probe_err1; | ||
| 697 | } | ||
| 698 | |||
| 699 | irq = adev->irq[0]; | ||
| 700 | ret = request_irq(irq, pl330_irq_handler, 0, | ||
| 701 | dev_name(&adev->dev), pi); | ||
| 702 | if (ret) | ||
| 703 | goto probe_err2; | ||
| 704 | |||
| 705 | ret = pl330_add(pi); | ||
| 706 | if (ret) | ||
| 707 | goto probe_err3; | ||
| 708 | |||
| 709 | INIT_LIST_HEAD(&pdmac->desc_pool); | ||
| 710 | spin_lock_init(&pdmac->pool_lock); | ||
| 711 | |||
| 712 | /* Create a descriptor pool of default size */ | ||
| 713 | if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) | ||
| 714 | dev_warn(&adev->dev, "unable to allocate desc\n"); | ||
| 715 | |||
| 716 | pd = &pdmac->ddma; | ||
| 717 | INIT_LIST_HEAD(&pd->channels); | ||
| 718 | |||
| 719 | /* Initialize channel parameters */ | ||
| 720 | for (i = 0; i < pdat->nr_valid_peri; i++) { | ||
| 721 | struct dma_pl330_peri *peri = &pdat->peri[i]; | ||
| 722 | pch = &pdmac->peripherals[i]; | ||
| 723 | |||
| 724 | switch (peri->rqtype) { | ||
| 725 | case MEMTOMEM: | ||
| 726 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
| 727 | break; | ||
| 728 | case MEMTODEV: | ||
| 729 | case DEVTOMEM: | ||
| 730 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
| 731 | break; | ||
| 732 | default: | ||
| 733 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
| 734 | continue; | ||
| 735 | } | ||
| 736 | |||
| 737 | INIT_LIST_HEAD(&pch->work_list); | ||
| 738 | spin_lock_init(&pch->lock); | ||
| 739 | pch->pl330_chid = NULL; | ||
| 740 | pch->chan.private = peri; | ||
| 741 | pch->chan.device = pd; | ||
| 742 | pch->chan.chan_id = i; | ||
| 743 | pch->dmac = pdmac; | ||
| 744 | |||
| 745 | /* Add the channel to the DMAC list */ | ||
| 746 | pd->chancnt++; | ||
| 747 | list_add_tail(&pch->chan.device_node, &pd->channels); | ||
| 748 | } | ||
| 749 | |||
| 750 | pd->dev = &adev->dev; | ||
| 751 | |||
| 752 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; | ||
| 753 | pd->device_free_chan_resources = pl330_free_chan_resources; | ||
| 754 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; | ||
| 755 | pd->device_tx_status = pl330_tx_status; | ||
| 756 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | ||
| 757 | pd->device_control = pl330_control; | ||
| 758 | pd->device_issue_pending = pl330_issue_pending; | ||
| 759 | |||
| 760 | ret = dma_async_device_register(pd); | ||
| 761 | if (ret) { | ||
| 762 | dev_err(&adev->dev, "unable to register DMAC\n"); | ||
| 763 | goto probe_err4; | ||
| 764 | } | ||
| 765 | |||
| 766 | amba_set_drvdata(adev, pdmac); | ||
| 767 | |||
| 768 | dev_info(&adev->dev, | ||
| 769 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); | ||
| 770 | dev_info(&adev->dev, | ||
| 771 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", | ||
| 772 | pi->pcfg.data_buf_dep, | ||
| 773 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | ||
| 774 | pi->pcfg.num_peri, pi->pcfg.num_events); | ||
| 775 | |||
| 776 | return 0; | ||
| 777 | |||
| 778 | probe_err4: | ||
| 779 | pl330_del(pi); | ||
| 780 | probe_err3: | ||
| 781 | free_irq(irq, pi); | ||
| 782 | probe_err2: | ||
| 783 | iounmap(pi->base); | ||
| 784 | probe_err1: | ||
| 785 | release_mem_region(res->start, resource_size(res)); | ||
| 786 | kfree(pdmac); | ||
| 787 | |||
| 788 | return ret; | ||
| 789 | } | ||
| 790 | |||
| 791 | static int __devexit pl330_remove(struct amba_device *adev) | ||
| 792 | { | ||
| 793 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | ||
| 794 | struct dma_pl330_chan *pch, *_p; | ||
| 795 | struct pl330_info *pi; | ||
| 796 | struct resource *res; | ||
| 797 | int irq; | ||
| 798 | |||
| 799 | if (!pdmac) | ||
| 800 | return 0; | ||
| 801 | |||
| 802 | amba_set_drvdata(adev, NULL); | ||
| 803 | |||
| 804 | /* Idle the DMAC */ | ||
| 805 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | ||
| 806 | chan.device_node) { | ||
| 807 | |||
| 808 | /* Remove the channel */ | ||
| 809 | list_del(&pch->chan.device_node); | ||
| 810 | |||
| 811 | /* Flush the channel */ | ||
| 812 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | ||
| 813 | pl330_free_chan_resources(&pch->chan); | ||
| 814 | } | ||
| 815 | |||
| 816 | pi = &pdmac->pif; | ||
| 817 | |||
| 818 | pl330_del(pi); | ||
| 819 | |||
| 820 | irq = adev->irq[0]; | ||
| 821 | free_irq(irq, pi); | ||
| 822 | |||
| 823 | iounmap(pi->base); | ||
| 824 | |||
| 825 | res = &adev->res; | ||
| 826 | release_mem_region(res->start, resource_size(res)); | ||
| 827 | |||
| 828 | kfree(pdmac); | ||
| 829 | |||
| 830 | return 0; | ||
| 831 | } | ||
| 832 | |||
| 833 | static struct amba_id pl330_ids[] = { | ||
| 834 | { | ||
| 835 | .id = 0x00041330, | ||
| 836 | .mask = 0x000fffff, | ||
| 837 | }, | ||
| 838 | { 0, 0 }, | ||
| 839 | }; | ||
| 840 | |||
| 841 | static struct amba_driver pl330_driver = { | ||
| 842 | .drv = { | ||
| 843 | .owner = THIS_MODULE, | ||
| 844 | .name = "dma-pl330", | ||
| 845 | }, | ||
| 846 | .id_table = pl330_ids, | ||
| 847 | .probe = pl330_probe, | ||
| 848 | .remove = pl330_remove, | ||
| 849 | }; | ||
| 850 | |||
| 851 | static int __init pl330_init(void) | ||
| 852 | { | ||
| 853 | return amba_driver_register(&pl330_driver); | ||
| 854 | } | ||
| 855 | module_init(pl330_init); | ||
| 856 | |||
| 857 | static void __exit pl330_exit(void) | ||
| 858 | { | ||
| 859 | amba_driver_unregister(&pl330_driver); | ||
| 860 | return; | ||
| 861 | } | ||
| 862 | module_exit(pl330_exit); | ||
| 863 | |||
| 864 | MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); | ||
| 865 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); | ||
| 866 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index cc5316dcf580..b3ba44c0a818 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
| @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
| 900 | flags |= RADEON_FRONT; | 900 | flags |= RADEON_FRONT; |
| 901 | } | 901 | } |
| 902 | if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { | 902 | if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { |
| 903 | if (!dev_priv->have_z_offset) | 903 | if (!dev_priv->have_z_offset) { |
| 904 | printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); | 904 | printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); |
| 905 | flags &= ~(RADEON_DEPTH | RADEON_STENCIL); | 905 | flags &= ~(RADEON_DEPTH | RADEON_STENCIL); |
| 906 | } | ||
| 906 | } | 907 | } |
| 907 | 908 | ||
| 908 | if (flags & (RADEON_FRONT | RADEON_BACK)) { | 909 | if (flags & (RADEON_FRONT | RADEON_BACK)) { |
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig index f15e90a453d1..fb5c5186d4aa 100644 --- a/drivers/idle/Kconfig +++ b/drivers/idle/Kconfig | |||
| @@ -1,3 +1,14 @@ | |||
| 1 | config INTEL_IDLE | ||
| 2 | tristate "Cpuidle Driver for Intel Processors" | ||
| 3 | depends on CPU_IDLE | ||
| 4 | depends on X86 | ||
| 5 | depends on CPU_SUP_INTEL | ||
| 6 | depends on EXPERIMENTAL | ||
| 7 | help | ||
| 8 | Enable intel_idle, a cpuidle driver that includes knowledge of | ||
| 9 | native Intel hardware idle features. The acpi_idle driver | ||
| 10 | can be configured at the same time, in order to handle | ||
| 11 | processors intel_idle does not support. | ||
| 1 | 12 | ||
| 2 | menu "Memory power savings" | 13 | menu "Memory power savings" |
| 3 | depends on X86_64 | 14 | depends on X86_64 |
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile index 5f68fc377e21..23d295cf10f2 100644 --- a/drivers/idle/Makefile +++ b/drivers/idle/Makefile | |||
| @@ -1,2 +1,3 @@ | |||
| 1 | obj-$(CONFIG_I7300_IDLE) += i7300_idle.o | 1 | obj-$(CONFIG_I7300_IDLE) += i7300_idle.o |
| 2 | obj-$(CONFIG_INTEL_IDLE) += intel_idle.o | ||
| 2 | 3 | ||
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c new file mode 100755 index 000000000000..54f0fb4cd5d2 --- /dev/null +++ b/drivers/idle/intel_idle.c | |||
| @@ -0,0 +1,461 @@ | |||
| 1 | /* | ||
| 2 | * intel_idle.c - native hardware idle loop for modern Intel processors | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010, Intel Corporation. | ||
| 5 | * Len Brown <len.brown@intel.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms and conditions of the GNU General Public License, | ||
| 9 | * version 2, as published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 14 | * more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along with | ||
| 17 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * intel_idle is a cpuidle driver that loads on specific Intel processors | ||
| 23 | * in lieu of the legacy ACPI processor_idle driver. The intent is to | ||
| 24 | * make Linux more efficient on these processors, as intel_idle knows | ||
| 25 | * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. | ||
| 26 | */ | ||
| 27 | |||
| 28 | /* | ||
| 29 | * Design Assumptions | ||
| 30 | * | ||
| 31 | * All CPUs have same idle states as boot CPU | ||
| 32 | * | ||
| 33 | * Chipset BM_STS (bus master status) bit is a NOP | ||
| 34 | * for preventing entry into deep C-stats | ||
| 35 | */ | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Known limitations | ||
| 39 | * | ||
| 40 | * The driver currently initializes for_each_online_cpu() upon modprobe. | ||
| 41 | * It it unaware of subsequent processors hot-added to the system. | ||
| 42 | * This means that if you boot with maxcpus=n and later online | ||
| 43 | * processors above n, those processors will use C1 only. | ||
| 44 | * | ||
| 45 | * ACPI has a .suspend hack to turn off deep c-statees during suspend | ||
| 46 | * to avoid complications with the lapic timer workaround. | ||
| 47 | * Have not seen issues with suspend, but may need same workaround here. | ||
| 48 | * | ||
| 49 | * There is currently no kernel-based automatic probing/loading mechanism | ||
| 50 | * if the driver is built as a module. | ||
| 51 | */ | ||
| 52 | |||
| 53 | /* un-comment DEBUG to enable pr_debug() statements */ | ||
| 54 | #define DEBUG | ||
| 55 | |||
| 56 | #include <linux/kernel.h> | ||
| 57 | #include <linux/cpuidle.h> | ||
| 58 | #include <linux/clockchips.h> | ||
| 59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | ||
| 60 | #include <trace/events/power.h> | ||
| 61 | #include <linux/sched.h> | ||
| 62 | |||
| 63 | #define INTEL_IDLE_VERSION "0.4" | ||
| 64 | #define PREFIX "intel_idle: " | ||
| 65 | |||
| 66 | #define MWAIT_SUBSTATE_MASK (0xf) | ||
| 67 | #define MWAIT_CSTATE_MASK (0xf) | ||
| 68 | #define MWAIT_SUBSTATE_SIZE (4) | ||
| 69 | #define MWAIT_MAX_NUM_CSTATES 8 | ||
| 70 | #define CPUID_MWAIT_LEAF (5) | ||
| 71 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | ||
| 72 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | ||
| 73 | |||
| 74 | static struct cpuidle_driver intel_idle_driver = { | ||
| 75 | .name = "intel_idle", | ||
| 76 | .owner = THIS_MODULE, | ||
| 77 | }; | ||
| 78 | /* intel_idle.max_cstate=0 disables driver */ | ||
| 79 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | ||
| 80 | static int power_policy = 7; /* 0 = max perf; 15 = max powersave */ | ||
| 81 | |||
| 82 | static unsigned int substates; | ||
| 83 | static int (*choose_substate)(int); | ||
| 84 | |||
| 85 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | ||
| 86 | static unsigned int lapic_timer_reliable_states; | ||
| 87 | |||
| 88 | static struct cpuidle_device *intel_idle_cpuidle_devices; | ||
| 89 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | ||
| 90 | |||
| 91 | static struct cpuidle_state *cpuidle_state_table; | ||
| 92 | |||
| 93 | /* | ||
| 94 | * States are indexed by the cstate number, | ||
| 95 | * which is also the index into the MWAIT hint array. | ||
| 96 | * Thus C0 is a dummy. | ||
| 97 | */ | ||
| 98 | static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | ||
| 99 | { /* MWAIT C0 */ }, | ||
| 100 | { /* MWAIT C1 */ | ||
| 101 | .name = "NHM-C1", | ||
| 102 | .desc = "MWAIT 0x00", | ||
| 103 | .driver_data = (void *) 0x00, | ||
| 104 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 105 | .exit_latency = 3, | ||
| 106 | .power_usage = 1000, | ||
| 107 | .target_residency = 6, | ||
| 108 | .enter = &intel_idle }, | ||
| 109 | { /* MWAIT C2 */ | ||
| 110 | .name = "NHM-C3", | ||
| 111 | .desc = "MWAIT 0x10", | ||
| 112 | .driver_data = (void *) 0x10, | ||
| 113 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 114 | .exit_latency = 20, | ||
| 115 | .power_usage = 500, | ||
| 116 | .target_residency = 80, | ||
| 117 | .enter = &intel_idle }, | ||
| 118 | { /* MWAIT C3 */ | ||
| 119 | .name = "NHM-C6", | ||
| 120 | .desc = "MWAIT 0x20", | ||
| 121 | .driver_data = (void *) 0x20, | ||
| 122 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 123 | .exit_latency = 200, | ||
| 124 | .power_usage = 350, | ||
| 125 | .target_residency = 800, | ||
| 126 | .enter = &intel_idle }, | ||
| 127 | }; | ||
| 128 | |||
| 129 | static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | ||
| 130 | { /* MWAIT C0 */ }, | ||
| 131 | { /* MWAIT C1 */ | ||
| 132 | .name = "ATM-C1", | ||
| 133 | .desc = "MWAIT 0x00", | ||
| 134 | .driver_data = (void *) 0x00, | ||
| 135 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 136 | .exit_latency = 1, | ||
| 137 | .power_usage = 1000, | ||
| 138 | .target_residency = 4, | ||
| 139 | .enter = &intel_idle }, | ||
| 140 | { /* MWAIT C2 */ | ||
| 141 | .name = "ATM-C2", | ||
| 142 | .desc = "MWAIT 0x10", | ||
| 143 | .driver_data = (void *) 0x10, | ||
| 144 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 145 | .exit_latency = 20, | ||
| 146 | .power_usage = 500, | ||
| 147 | .target_residency = 80, | ||
| 148 | .enter = &intel_idle }, | ||
| 149 | { /* MWAIT C3 */ }, | ||
| 150 | { /* MWAIT C4 */ | ||
| 151 | .name = "ATM-C4", | ||
| 152 | .desc = "MWAIT 0x30", | ||
| 153 | .driver_data = (void *) 0x30, | ||
| 154 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 155 | .exit_latency = 100, | ||
| 156 | .power_usage = 250, | ||
| 157 | .target_residency = 400, | ||
| 158 | .enter = &intel_idle }, | ||
| 159 | { /* MWAIT C5 */ }, | ||
| 160 | { /* MWAIT C6 */ | ||
| 161 | .name = "ATM-C6", | ||
| 162 | .desc = "MWAIT 0x40", | ||
| 163 | .driver_data = (void *) 0x40, | ||
| 164 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
| 165 | .exit_latency = 200, | ||
| 166 | .power_usage = 150, | ||
| 167 | .target_residency = 800, | ||
| 168 | .enter = NULL }, /* disabled */ | ||
| 169 | }; | ||
| 170 | |||
| 171 | /* | ||
| 172 | * choose_tunable_substate() | ||
| 173 | * | ||
| 174 | * Run-time decision on which C-state substate to invoke | ||
| 175 | * If power_policy = 0, choose shallowest substate (0) | ||
| 176 | * If power_policy = 15, choose deepest substate | ||
| 177 | * If power_policy = middle, choose middle substate etc. | ||
| 178 | */ | ||
| 179 | static int choose_tunable_substate(int cstate) | ||
| 180 | { | ||
| 181 | unsigned int num_substates; | ||
| 182 | unsigned int substate_choice; | ||
| 183 | |||
| 184 | power_policy &= 0xF; /* valid range: 0-15 */ | ||
| 185 | cstate &= 7; /* valid range: 0-7 */ | ||
| 186 | |||
| 187 | num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; | ||
| 188 | |||
| 189 | if (num_substates <= 1) | ||
| 190 | return 0; | ||
| 191 | |||
| 192 | substate_choice = ((power_policy + (power_policy + 1) * | ||
| 193 | (num_substates - 1)) / 16); | ||
| 194 | |||
| 195 | return substate_choice; | ||
| 196 | } | ||
| 197 | |||
| 198 | /* | ||
| 199 | * choose_zero_substate() | ||
| 200 | */ | ||
| 201 | static int choose_zero_substate(int cstate) | ||
| 202 | { | ||
| 203 | return 0; | ||
| 204 | } | ||
| 205 | |||
| 206 | /** | ||
| 207 | * intel_idle | ||
| 208 | * @dev: cpuidle_device | ||
| 209 | * @state: cpuidle state | ||
| 210 | * | ||
| 211 | */ | ||
| 212 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | ||
| 213 | { | ||
| 214 | unsigned long ecx = 1; /* break on interrupt flag */ | ||
| 215 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state); | ||
| 216 | unsigned int cstate; | ||
| 217 | ktime_t kt_before, kt_after; | ||
| 218 | s64 usec_delta; | ||
| 219 | int cpu = smp_processor_id(); | ||
| 220 | |||
| 221 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | ||
| 222 | |||
| 223 | eax = eax + (choose_substate)(cstate); | ||
| 224 | |||
| 225 | local_irq_disable(); | ||
| 226 | |||
| 227 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | ||
| 228 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | ||
| 229 | |||
| 230 | kt_before = ktime_get_real(); | ||
| 231 | |||
| 232 | stop_critical_timings(); | ||
| 233 | #ifndef MODULE | ||
| 234 | trace_power_start(POWER_CSTATE, (eax >> 4) + 1); | ||
| 235 | #endif | ||
| 236 | if (!need_resched()) { | ||
| 237 | |||
| 238 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
| 239 | smp_mb(); | ||
| 240 | if (!need_resched()) | ||
| 241 | __mwait(eax, ecx); | ||
| 242 | } | ||
| 243 | |||
| 244 | start_critical_timings(); | ||
| 245 | |||
| 246 | kt_after = ktime_get_real(); | ||
| 247 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | ||
| 248 | |||
| 249 | local_irq_enable(); | ||
| 250 | |||
| 251 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | ||
| 252 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | ||
| 253 | |||
| 254 | return usec_delta; | ||
| 255 | } | ||
| 256 | |||
| 257 | /* | ||
| 258 | * intel_idle_probe() | ||
| 259 | */ | ||
| 260 | static int intel_idle_probe(void) | ||
| 261 | { | ||
| 262 | unsigned int eax, ebx, ecx, edx; | ||
| 263 | |||
| 264 | if (max_cstate == 0) { | ||
| 265 | pr_debug(PREFIX "disabled\n"); | ||
| 266 | return -EPERM; | ||
| 267 | } | ||
| 268 | |||
| 269 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
| 270 | return -ENODEV; | ||
| 271 | |||
| 272 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | ||
| 273 | return -ENODEV; | ||
| 274 | |||
| 275 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
| 276 | return -ENODEV; | ||
| 277 | |||
| 278 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | ||
| 279 | |||
| 280 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | ||
| 281 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | ||
| 282 | return -ENODEV; | ||
| 283 | #ifdef DEBUG | ||
| 284 | if (substates == 0) /* can over-ride via modparam */ | ||
| 285 | #endif | ||
| 286 | substates = edx; | ||
| 287 | |||
| 288 | pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates); | ||
| 289 | |||
| 290 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ | ||
| 291 | lapic_timer_reliable_states = 0xFFFFFFFF; | ||
| 292 | |||
| 293 | if (boot_cpu_data.x86 != 6) /* family 6 */ | ||
| 294 | return -ENODEV; | ||
| 295 | |||
| 296 | switch (boot_cpu_data.x86_model) { | ||
| 297 | |||
| 298 | case 0x1A: /* Core i7, Xeon 5500 series */ | ||
| 299 | case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ | ||
| 300 | case 0x1F: /* Core i7 and i5 Processor - Nehalem */ | ||
| 301 | case 0x2E: /* Nehalem-EX Xeon */ | ||
| 302 | lapic_timer_reliable_states = (1 << 1); /* C1 */ | ||
| 303 | |||
| 304 | case 0x25: /* Westmere */ | ||
| 305 | case 0x2C: /* Westmere */ | ||
| 306 | cpuidle_state_table = nehalem_cstates; | ||
| 307 | choose_substate = choose_tunable_substate; | ||
| 308 | break; | ||
| 309 | |||
| 310 | case 0x1C: /* 28 - Atom Processor */ | ||
| 311 | lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ | ||
| 312 | cpuidle_state_table = atom_cstates; | ||
| 313 | choose_substate = choose_zero_substate; | ||
| 314 | break; | ||
| 315 | #ifdef FUTURE_USE | ||
| 316 | case 0x17: /* 23 - Core 2 Duo */ | ||
| 317 | lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ | ||
| 318 | #endif | ||
| 319 | |||
| 320 | default: | ||
| 321 | pr_debug(PREFIX "does not run on family %d model %d\n", | ||
| 322 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
| 323 | return -ENODEV; | ||
| 324 | } | ||
| 325 | |||
| 326 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION | ||
| 327 | " model 0x%X\n", boot_cpu_data.x86_model); | ||
| 328 | |||
| 329 | pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", | ||
| 330 | lapic_timer_reliable_states); | ||
| 331 | return 0; | ||
| 332 | } | ||
| 333 | |||
| 334 | /* | ||
| 335 | * intel_idle_cpuidle_devices_uninit() | ||
| 336 | * unregister, free cpuidle_devices | ||
| 337 | */ | ||
| 338 | static void intel_idle_cpuidle_devices_uninit(void) | ||
| 339 | { | ||
| 340 | int i; | ||
| 341 | struct cpuidle_device *dev; | ||
| 342 | |||
| 343 | for_each_online_cpu(i) { | ||
| 344 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); | ||
| 345 | cpuidle_unregister_device(dev); | ||
| 346 | } | ||
| 347 | |||
| 348 | free_percpu(intel_idle_cpuidle_devices); | ||
| 349 | return; | ||
| 350 | } | ||
| 351 | /* | ||
| 352 | * intel_idle_cpuidle_devices_init() | ||
| 353 | * allocate, initialize, register cpuidle_devices | ||
| 354 | */ | ||
| 355 | static int intel_idle_cpuidle_devices_init(void) | ||
| 356 | { | ||
| 357 | int i, cstate; | ||
| 358 | struct cpuidle_device *dev; | ||
| 359 | |||
| 360 | intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); | ||
| 361 | if (intel_idle_cpuidle_devices == NULL) | ||
| 362 | return -ENOMEM; | ||
| 363 | |||
| 364 | for_each_online_cpu(i) { | ||
| 365 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); | ||
| 366 | |||
| 367 | dev->state_count = 1; | ||
| 368 | |||
| 369 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | ||
| 370 | int num_substates; | ||
| 371 | |||
| 372 | if (cstate > max_cstate) { | ||
| 373 | printk(PREFIX "max_cstate %d reached\n", | ||
| 374 | max_cstate); | ||
| 375 | break; | ||
| 376 | } | ||
| 377 | |||
| 378 | /* does the state exist in CPUID.MWAIT? */ | ||
| 379 | num_substates = (substates >> ((cstate) * 4)) | ||
| 380 | & MWAIT_SUBSTATE_MASK; | ||
| 381 | if (num_substates == 0) | ||
| 382 | continue; | ||
| 383 | /* is the state not enabled? */ | ||
| 384 | if (cpuidle_state_table[cstate].enter == NULL) { | ||
| 385 | /* does the driver not know about the state? */ | ||
| 386 | if (*cpuidle_state_table[cstate].name == '\0') | ||
| 387 | pr_debug(PREFIX "unaware of model 0x%x" | ||
| 388 | " MWAIT %d please" | ||
| 389 | " contact lenb@kernel.org", | ||
| 390 | boot_cpu_data.x86_model, cstate); | ||
| 391 | continue; | ||
| 392 | } | ||
| 393 | |||
| 394 | if ((cstate > 2) && | ||
| 395 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
| 396 | mark_tsc_unstable("TSC halts in idle" | ||
| 397 | " states deeper than C2"); | ||
| 398 | |||
| 399 | dev->states[dev->state_count] = /* structure copy */ | ||
| 400 | cpuidle_state_table[cstate]; | ||
| 401 | |||
| 402 | dev->state_count += 1; | ||
| 403 | } | ||
| 404 | |||
| 405 | dev->cpu = i; | ||
| 406 | if (cpuidle_register_device(dev)) { | ||
| 407 | pr_debug(PREFIX "cpuidle_register_device %d failed!\n", | ||
| 408 | i); | ||
| 409 | intel_idle_cpuidle_devices_uninit(); | ||
| 410 | return -EIO; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | |||
| 417 | |||
| 418 | static int __init intel_idle_init(void) | ||
| 419 | { | ||
| 420 | int retval; | ||
| 421 | |||
| 422 | retval = intel_idle_probe(); | ||
| 423 | if (retval) | ||
| 424 | return retval; | ||
| 425 | |||
| 426 | retval = cpuidle_register_driver(&intel_idle_driver); | ||
| 427 | if (retval) { | ||
| 428 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", | ||
| 429 | cpuidle_get_driver()->name); | ||
| 430 | return retval; | ||
| 431 | } | ||
| 432 | |||
| 433 | retval = intel_idle_cpuidle_devices_init(); | ||
| 434 | if (retval) { | ||
| 435 | cpuidle_unregister_driver(&intel_idle_driver); | ||
| 436 | return retval; | ||
| 437 | } | ||
| 438 | |||
| 439 | return 0; | ||
| 440 | } | ||
| 441 | |||
| 442 | static void __exit intel_idle_exit(void) | ||
| 443 | { | ||
| 444 | intel_idle_cpuidle_devices_uninit(); | ||
| 445 | cpuidle_unregister_driver(&intel_idle_driver); | ||
| 446 | |||
| 447 | return; | ||
| 448 | } | ||
| 449 | |||
| 450 | module_init(intel_idle_init); | ||
| 451 | module_exit(intel_idle_exit); | ||
| 452 | |||
| 453 | module_param(power_policy, int, 0644); | ||
| 454 | module_param(max_cstate, int, 0444); | ||
| 455 | #ifdef DEBUG | ||
| 456 | module_param(substates, int, 0444); | ||
| 457 | #endif | ||
| 458 | |||
| 459 | MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); | ||
| 460 | MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); | ||
| 461 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 46474842cfe9..08f948df8fa9 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
| @@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len) | |||
| 706 | if (!len) | 706 | if (!len) |
| 707 | return 0; | 707 | return 0; |
| 708 | 708 | ||
| 709 | data = kmalloc(len, GFP_KERNEL); | 709 | data = memdup_user((void __user *)(unsigned long)src, len); |
| 710 | if (!data) | 710 | if (IS_ERR(data)) |
| 711 | return -ENOMEM; | 711 | return PTR_ERR(data); |
| 712 | |||
| 713 | if (copy_from_user(data, (void __user *)(unsigned long)src, len)) { | ||
| 714 | kfree(data); | ||
| 715 | return -EFAULT; | ||
| 716 | } | ||
| 717 | 712 | ||
| 718 | *dest = data; | 713 | *dest = data; |
| 719 | return 0; | 714 | return 0; |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 755470440ef1..edef8527eb34 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
| @@ -144,10 +144,11 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf, | |||
| 144 | size_t count, loff_t *ppos) | 144 | size_t count, loff_t *ppos) |
| 145 | { | 145 | { |
| 146 | u64 *counters; | 146 | u64 *counters; |
| 147 | size_t avail; | ||
| 147 | struct qib_devdata *dd = private2dd(file); | 148 | struct qib_devdata *dd = private2dd(file); |
| 148 | 149 | ||
| 149 | return simple_read_from_buffer(buf, count, ppos, counters, | 150 | avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters); |
| 150 | dd->f_read_cntrs(dd, *ppos, NULL, &counters)); | 151 | return simple_read_from_buffer(buf, count, ppos, counters, avail); |
| 151 | } | 152 | } |
| 152 | 153 | ||
| 153 | /* read the per-device counters */ | 154 | /* read the per-device counters */ |
| @@ -155,10 +156,11 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, | |||
| 155 | size_t count, loff_t *ppos) | 156 | size_t count, loff_t *ppos) |
| 156 | { | 157 | { |
| 157 | char *names; | 158 | char *names; |
| 159 | size_t avail; | ||
| 158 | struct qib_devdata *dd = private2dd(file); | 160 | struct qib_devdata *dd = private2dd(file); |
| 159 | 161 | ||
| 160 | return simple_read_from_buffer(buf, count, ppos, names, | 162 | avail = dd->f_read_cntrs(dd, *ppos, &names, NULL); |
| 161 | dd->f_read_cntrs(dd, *ppos, &names, NULL)); | 163 | return simple_read_from_buffer(buf, count, ppos, names, avail); |
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | static const struct file_operations cntr_ops[] = { | 166 | static const struct file_operations cntr_ops[] = { |
| @@ -176,10 +178,11 @@ static ssize_t portnames_read(struct file *file, char __user *buf, | |||
| 176 | size_t count, loff_t *ppos) | 178 | size_t count, loff_t *ppos) |
| 177 | { | 179 | { |
| 178 | char *names; | 180 | char *names; |
| 181 | size_t avail; | ||
| 179 | struct qib_devdata *dd = private2dd(file); | 182 | struct qib_devdata *dd = private2dd(file); |
| 180 | 183 | ||
| 181 | return simple_read_from_buffer(buf, count, ppos, names, | 184 | avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL); |
| 182 | dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL)); | 185 | return simple_read_from_buffer(buf, count, ppos, names, avail); |
| 183 | } | 186 | } |
| 184 | 187 | ||
| 185 | /* read the per-port counters for port 1 (pidx 0) */ | 188 | /* read the per-port counters for port 1 (pidx 0) */ |
| @@ -187,10 +190,11 @@ static ssize_t portcntrs_1_read(struct file *file, char __user *buf, | |||
| 187 | size_t count, loff_t *ppos) | 190 | size_t count, loff_t *ppos) |
| 188 | { | 191 | { |
| 189 | u64 *counters; | 192 | u64 *counters; |
| 193 | size_t avail; | ||
| 190 | struct qib_devdata *dd = private2dd(file); | 194 | struct qib_devdata *dd = private2dd(file); |
| 191 | 195 | ||
| 192 | return simple_read_from_buffer(buf, count, ppos, counters, | 196 | avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters); |
| 193 | dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters)); | 197 | return simple_read_from_buffer(buf, count, ppos, counters, avail); |
| 194 | } | 198 | } |
| 195 | 199 | ||
| 196 | /* read the per-port counters for port 2 (pidx 1) */ | 200 | /* read the per-port counters for port 2 (pidx 1) */ |
| @@ -198,10 +202,11 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, | |||
| 198 | size_t count, loff_t *ppos) | 202 | size_t count, loff_t *ppos) |
| 199 | { | 203 | { |
| 200 | u64 *counters; | 204 | u64 *counters; |
| 205 | size_t avail; | ||
| 201 | struct qib_devdata *dd = private2dd(file); | 206 | struct qib_devdata *dd = private2dd(file); |
| 202 | 207 | ||
| 203 | return simple_read_from_buffer(buf, count, ppos, counters, | 208 | avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters); |
| 204 | dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters)); | 209 | return simple_read_from_buffer(buf, count, ppos, counters, avail); |
| 205 | } | 210 | } |
| 206 | 211 | ||
| 207 | static const struct file_operations portcntr_ops[] = { | 212 | static const struct file_operations portcntr_ops[] = { |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 7b6549fd429b..1eadadc13da8 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
| @@ -3475,14 +3475,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, | |||
| 3475 | struct qib_devdata *dd; | 3475 | struct qib_devdata *dd; |
| 3476 | int ret; | 3476 | int ret; |
| 3477 | 3477 | ||
| 3478 | #ifndef CONFIG_PCI_MSI | ||
| 3479 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | ||
| 3480 | "work if CONFIG_PCI_MSI is not enabled\n", | ||
| 3481 | ent->device); | ||
| 3482 | dd = ERR_PTR(-ENODEV); | ||
| 3483 | goto bail; | ||
| 3484 | #endif | ||
| 3485 | |||
| 3486 | dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + | 3478 | dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + |
| 3487 | sizeof(struct qib_chip_specific)); | 3479 | sizeof(struct qib_chip_specific)); |
| 3488 | if (IS_ERR(dd)) | 3480 | if (IS_ERR(dd)) |
| @@ -3554,10 +3546,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, | |||
| 3554 | if (qib_mini_init) | 3546 | if (qib_mini_init) |
| 3555 | goto bail; | 3547 | goto bail; |
| 3556 | 3548 | ||
| 3557 | #ifndef CONFIG_PCI_MSI | ||
| 3558 | qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n"); | ||
| 3559 | #endif | ||
| 3560 | |||
| 3561 | if (qib_pcie_params(dd, 8, NULL, NULL)) | 3549 | if (qib_pcie_params(dd, 8, NULL, NULL)) |
| 3562 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | 3550 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " |
| 3563 | "continuing anyway\n"); | 3551 | "continuing anyway\n"); |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 2c24eab35b54..503992d9c5ce 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -42,9 +42,6 @@ | |||
| 42 | #include <linux/jiffies.h> | 42 | #include <linux/jiffies.h> |
| 43 | #include <rdma/ib_verbs.h> | 43 | #include <rdma/ib_verbs.h> |
| 44 | #include <rdma/ib_smi.h> | 44 | #include <rdma/ib_smi.h> |
| 45 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 46 | #include <linux/dca.h> | ||
| 47 | #endif | ||
| 48 | 45 | ||
| 49 | #include "qib.h" | 46 | #include "qib.h" |
| 50 | #include "qib_7322_regs.h" | 47 | #include "qib_7322_regs.h" |
| @@ -114,40 +111,18 @@ static ushort qib_singleport; | |||
| 114 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | 111 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); |
| 115 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | 112 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); |
| 116 | 113 | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Setup QMH7342 receive and transmit parameters, necessary because | ||
| 120 | * each bay, Mez connector, and IB port need different tuning, beyond | ||
| 121 | * what the switch and HCA can do automatically. | ||
| 122 | * It's expected to be done by cat'ing files to the modules file, | ||
| 123 | * rather than setting up as a module parameter. | ||
| 124 | * It's a "write-only" file, returns 0 when read back. | ||
| 125 | * The unit, port, bay (if given), and values MUST be done as a single write. | ||
| 126 | * The unit, port, and bay must precede the values to be effective. | ||
| 127 | */ | ||
| 128 | static int setup_qmh_params(const char *, struct kernel_param *); | ||
| 129 | static unsigned dummy_qmh_params; | ||
| 130 | module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint, | ||
| 131 | &dummy_qmh_params, S_IWUSR | S_IRUGO); | ||
| 132 | |||
| 133 | /* similarly for QME7342, but it's simpler */ | ||
| 134 | static int setup_qme_params(const char *, struct kernel_param *); | ||
| 135 | static unsigned dummy_qme_params; | ||
| 136 | module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint, | ||
| 137 | &dummy_qme_params, S_IWUSR | S_IRUGO); | ||
| 138 | |||
| 139 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ | 114 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ |
| 140 | /* for read back, default index is ~5m copper cable */ | 115 | /* for read back, default index is ~5m copper cable */ |
| 141 | static char cable_atten_list[MAX_ATTEN_LEN] = "10"; | 116 | static char txselect_list[MAX_ATTEN_LEN] = "10"; |
| 142 | static struct kparam_string kp_cable_atten = { | 117 | static struct kparam_string kp_txselect = { |
| 143 | .string = cable_atten_list, | 118 | .string = txselect_list, |
| 144 | .maxlen = MAX_ATTEN_LEN | 119 | .maxlen = MAX_ATTEN_LEN |
| 145 | }; | 120 | }; |
| 146 | static int setup_cable_atten(const char *, struct kernel_param *); | 121 | static int setup_txselect(const char *, struct kernel_param *); |
| 147 | module_param_call(cable_atten, setup_cable_atten, param_get_string, | 122 | module_param_call(txselect, setup_txselect, param_get_string, |
| 148 | &kp_cable_atten, S_IWUSR | S_IRUGO); | 123 | &kp_txselect, S_IWUSR | S_IRUGO); |
| 149 | MODULE_PARM_DESC(cable_atten, \ | 124 | MODULE_PARM_DESC(txselect, \ |
| 150 | "cable attenuation indices for cables with invalid EEPROM"); | 125 | "Tx serdes indices (for no QSFP or invalid QSFP data)"); |
| 151 | 126 | ||
| 152 | #define BOARD_QME7342 5 | 127 | #define BOARD_QME7342 5 |
| 153 | #define BOARD_QMH7342 6 | 128 | #define BOARD_QMH7342 6 |
| @@ -540,12 +515,6 @@ struct qib_chip_specific { | |||
| 540 | u32 lastbuf_for_pio; | 515 | u32 lastbuf_for_pio; |
| 541 | u32 stay_in_freeze; | 516 | u32 stay_in_freeze; |
| 542 | u32 recovery_ports_initted; | 517 | u32 recovery_ports_initted; |
| 543 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 544 | u32 dca_ctrl; | ||
| 545 | int rhdr_cpu[18]; | ||
| 546 | int sdma_cpu[2]; | ||
| 547 | u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ | ||
| 548 | #endif | ||
| 549 | struct msix_entry *msix_entries; | 518 | struct msix_entry *msix_entries; |
| 550 | void **msix_arg; | 519 | void **msix_arg; |
| 551 | unsigned long *sendchkenable; | 520 | unsigned long *sendchkenable; |
| @@ -574,11 +543,12 @@ struct vendor_txdds_ent { | |||
| 574 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | 543 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); |
| 575 | 544 | ||
| 576 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | 545 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ |
| 546 | #define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ | ||
| 577 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | 547 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
| 578 | 548 | ||
| 579 | #define H1_FORCE_VAL 8 | 549 | #define H1_FORCE_VAL 8 |
| 580 | #define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */ | 550 | #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ |
| 581 | #define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */ | 551 | #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ |
| 582 | 552 | ||
| 583 | /* The static and dynamic registers are paired, and the pairs indexed by spd */ | 553 | /* The static and dynamic registers are paired, and the pairs indexed by spd */ |
| 584 | #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ | 554 | #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ |
| @@ -590,15 +560,6 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | |||
| 590 | #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ | 560 | #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ |
| 591 | #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ | 561 | #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ |
| 592 | 562 | ||
| 593 | static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 }; | ||
| 594 | static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 }; | ||
| 595 | static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 }; | ||
| 596 | |||
| 597 | /* this is used for unknown mez cards also */ | ||
| 598 | static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 }; | ||
| 599 | static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 }; | ||
| 600 | static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 }; | ||
| 601 | |||
| 602 | struct qib_chippport_specific { | 563 | struct qib_chippport_specific { |
| 603 | u64 __iomem *kpregbase; | 564 | u64 __iomem *kpregbase; |
| 604 | u64 __iomem *cpregbase; | 565 | u64 __iomem *cpregbase; |
| @@ -637,12 +598,8 @@ struct qib_chippport_specific { | |||
| 637 | * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. | 598 | * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. |
| 638 | * entry zero is unused, to simplify indexing | 599 | * entry zero is unused, to simplify indexing |
| 639 | */ | 600 | */ |
| 640 | u16 h1_val; | 601 | u8 h1_val; |
| 641 | u8 amp[SERDES_CHANS]; | 602 | u8 no_eep; /* txselect table index to use if no qsfp info */ |
| 642 | u8 pre[SERDES_CHANS]; | ||
| 643 | u8 mainv[SERDES_CHANS]; | ||
| 644 | u8 post[SERDES_CHANS]; | ||
| 645 | u8 no_eep; /* attenuation index to use if no qsfp info */ | ||
| 646 | u8 ipg_tries; | 603 | u8 ipg_tries; |
| 647 | u8 ibmalfusesnap; | 604 | u8 ibmalfusesnap; |
| 648 | struct qib_qsfp_data qsfp_data; | 605 | struct qib_qsfp_data qsfp_data; |
| @@ -676,52 +633,6 @@ static struct { | |||
| 676 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, | 633 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, |
| 677 | }; | 634 | }; |
| 678 | 635 | ||
| 679 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 680 | static const struct dca_reg_map { | ||
| 681 | int shadow_inx; | ||
| 682 | int lsb; | ||
| 683 | u64 mask; | ||
| 684 | u16 regno; | ||
| 685 | } dca_rcvhdr_reg_map[] = { | ||
| 686 | { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), | ||
| 687 | ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
| 688 | { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), | ||
| 689 | ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
| 690 | { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), | ||
| 691 | ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
| 692 | { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), | ||
| 693 | ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
| 694 | { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), | ||
| 695 | ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
| 696 | { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), | ||
| 697 | ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
| 698 | { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), | ||
| 699 | ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
| 700 | { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), | ||
| 701 | ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
| 702 | { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), | ||
| 703 | ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
| 704 | { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), | ||
| 705 | ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
| 706 | { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), | ||
| 707 | ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
| 708 | { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), | ||
| 709 | ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
| 710 | { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), | ||
| 711 | ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
| 712 | { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), | ||
| 713 | ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
| 714 | { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), | ||
| 715 | ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
| 716 | { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), | ||
| 717 | ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
| 718 | { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), | ||
| 719 | ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, | ||
| 720 | { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), | ||
| 721 | ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, | ||
| 722 | }; | ||
| 723 | #endif | ||
| 724 | |||
| 725 | /* ibcctrl bits */ | 636 | /* ibcctrl bits */ |
| 726 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | 637 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 |
| 727 | /* cycle through TS1/TS2 till OK */ | 638 | /* cycle through TS1/TS2 till OK */ |
| @@ -2572,95 +2483,6 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) | |||
| 2572 | qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); | 2483 | qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); |
| 2573 | } | 2484 | } |
| 2574 | 2485 | ||
| 2575 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 2576 | static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd) | ||
| 2577 | { | ||
| 2578 | struct qib_devdata *dd = rcd->dd; | ||
| 2579 | struct qib_chip_specific *cspec = dd->cspec; | ||
| 2580 | int cpu = get_cpu(); | ||
| 2581 | |||
| 2582 | if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { | ||
| 2583 | const struct dca_reg_map *rmp; | ||
| 2584 | |||
| 2585 | cspec->rhdr_cpu[rcd->ctxt] = cpu; | ||
| 2586 | rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; | ||
| 2587 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; | ||
| 2588 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= | ||
| 2589 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; | ||
| 2590 | qib_write_kreg(dd, rmp->regno, | ||
| 2591 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | ||
| 2592 | cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); | ||
| 2593 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||
| 2594 | } | ||
| 2595 | put_cpu(); | ||
| 2596 | } | ||
| 2597 | |||
| 2598 | static void qib_update_sdma_dca(struct qib_pportdata *ppd) | ||
| 2599 | { | ||
| 2600 | struct qib_devdata *dd = ppd->dd; | ||
| 2601 | struct qib_chip_specific *cspec = dd->cspec; | ||
| 2602 | int cpu = get_cpu(); | ||
| 2603 | unsigned pidx = ppd->port - 1; | ||
| 2604 | |||
| 2605 | if (cspec->sdma_cpu[pidx] != cpu) { | ||
| 2606 | cspec->sdma_cpu[pidx] = cpu; | ||
| 2607 | cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? | ||
| 2608 | SYM_MASK(DCACtrlF, SendDma1DCAOPH) : | ||
| 2609 | SYM_MASK(DCACtrlF, SendDma0DCAOPH)); | ||
| 2610 | cspec->dca_rcvhdr_ctrl[4] |= | ||
| 2611 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << | ||
| 2612 | (ppd->hw_pidx ? | ||
| 2613 | SYM_LSB(DCACtrlF, SendDma1DCAOPH) : | ||
| 2614 | SYM_LSB(DCACtrlF, SendDma0DCAOPH)); | ||
| 2615 | qib_write_kreg(dd, KREG_IDX(DCACtrlF), | ||
| 2616 | cspec->dca_rcvhdr_ctrl[4]); | ||
| 2617 | cspec->dca_ctrl |= ppd->hw_pidx ? | ||
| 2618 | SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : | ||
| 2619 | SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); | ||
| 2620 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||
| 2621 | } | ||
| 2622 | put_cpu(); | ||
| 2623 | } | ||
| 2624 | |||
| 2625 | static void qib_setup_dca(struct qib_devdata *dd) | ||
| 2626 | { | ||
| 2627 | struct qib_chip_specific *cspec = dd->cspec; | ||
| 2628 | int i; | ||
| 2629 | |||
| 2630 | for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) | ||
| 2631 | cspec->rhdr_cpu[i] = -1; | ||
| 2632 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||
| 2633 | cspec->sdma_cpu[i] = -1; | ||
| 2634 | cspec->dca_rcvhdr_ctrl[0] = | ||
| 2635 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | | ||
| 2636 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | | ||
| 2637 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | | ||
| 2638 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); | ||
| 2639 | cspec->dca_rcvhdr_ctrl[1] = | ||
| 2640 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | | ||
| 2641 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | | ||
| 2642 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | | ||
| 2643 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); | ||
| 2644 | cspec->dca_rcvhdr_ctrl[2] = | ||
| 2645 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | | ||
| 2646 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | | ||
| 2647 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | | ||
| 2648 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); | ||
| 2649 | cspec->dca_rcvhdr_ctrl[3] = | ||
| 2650 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | | ||
| 2651 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | | ||
| 2652 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | | ||
| 2653 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); | ||
| 2654 | cspec->dca_rcvhdr_ctrl[4] = | ||
| 2655 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | | ||
| 2656 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); | ||
| 2657 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||
| 2658 | qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, | ||
| 2659 | cspec->dca_rcvhdr_ctrl[i]); | ||
| 2660 | } | ||
| 2661 | |||
| 2662 | #endif | ||
| 2663 | |||
| 2664 | /* | 2486 | /* |
| 2665 | * Disable MSIx interrupt if enabled, call generic MSIx code | 2487 | * Disable MSIx interrupt if enabled, call generic MSIx code |
| 2666 | * to cleanup, and clear pending MSIx interrupts. | 2488 | * to cleanup, and clear pending MSIx interrupts. |
| @@ -2701,15 +2523,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) | |||
| 2701 | { | 2523 | { |
| 2702 | int i; | 2524 | int i; |
| 2703 | 2525 | ||
| 2704 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 2705 | if (dd->flags & QIB_DCA_ENABLED) { | ||
| 2706 | dca_remove_requester(&dd->pcidev->dev); | ||
| 2707 | dd->flags &= ~QIB_DCA_ENABLED; | ||
| 2708 | dd->cspec->dca_ctrl = 0; | ||
| 2709 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); | ||
| 2710 | } | ||
| 2711 | #endif | ||
| 2712 | |||
| 2713 | qib_7322_free_irq(dd); | 2526 | qib_7322_free_irq(dd); |
| 2714 | kfree(dd->cspec->cntrs); | 2527 | kfree(dd->cspec->cntrs); |
| 2715 | kfree(dd->cspec->sendchkenable); | 2528 | kfree(dd->cspec->sendchkenable); |
| @@ -3017,11 +2830,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data) | |||
| 3017 | if (dd->int_counter != (u32) -1) | 2830 | if (dd->int_counter != (u32) -1) |
| 3018 | dd->int_counter++; | 2831 | dd->int_counter++; |
| 3019 | 2832 | ||
| 3020 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 3021 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 3022 | qib_update_rhdrq_dca(rcd); | ||
| 3023 | #endif | ||
| 3024 | |||
| 3025 | /* Clear the interrupt bit we expect to be set. */ | 2833 | /* Clear the interrupt bit we expect to be set. */ |
| 3026 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | | 2834 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | |
| 3027 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | 2835 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); |
| @@ -3085,11 +2893,6 @@ static irqreturn_t sdma_intr(int irq, void *data) | |||
| 3085 | if (dd->int_counter != (u32) -1) | 2893 | if (dd->int_counter != (u32) -1) |
| 3086 | dd->int_counter++; | 2894 | dd->int_counter++; |
| 3087 | 2895 | ||
| 3088 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 3089 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 3090 | qib_update_sdma_dca(ppd); | ||
| 3091 | #endif | ||
| 3092 | |||
| 3093 | /* Clear the interrupt bit we expect to be set. */ | 2896 | /* Clear the interrupt bit we expect to be set. */ |
| 3094 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 2897 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| 3095 | INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); | 2898 | INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); |
| @@ -3119,11 +2922,6 @@ static irqreturn_t sdma_idle_intr(int irq, void *data) | |||
| 3119 | if (dd->int_counter != (u32) -1) | 2922 | if (dd->int_counter != (u32) -1) |
| 3120 | dd->int_counter++; | 2923 | dd->int_counter++; |
| 3121 | 2924 | ||
| 3122 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 3123 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 3124 | qib_update_sdma_dca(ppd); | ||
| 3125 | #endif | ||
| 3126 | |||
| 3127 | /* Clear the interrupt bit we expect to be set. */ | 2925 | /* Clear the interrupt bit we expect to be set. */ |
| 3128 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 2926 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| 3129 | INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); | 2927 | INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); |
| @@ -3153,11 +2951,6 @@ static irqreturn_t sdma_progress_intr(int irq, void *data) | |||
| 3153 | if (dd->int_counter != (u32) -1) | 2951 | if (dd->int_counter != (u32) -1) |
| 3154 | dd->int_counter++; | 2952 | dd->int_counter++; |
| 3155 | 2953 | ||
| 3156 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 3157 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 3158 | qib_update_sdma_dca(ppd); | ||
| 3159 | #endif | ||
| 3160 | |||
| 3161 | /* Clear the interrupt bit we expect to be set. */ | 2954 | /* Clear the interrupt bit we expect to be set. */ |
| 3162 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 2955 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| 3163 | INT_MASK_P(SDmaProgress, 1) : | 2956 | INT_MASK_P(SDmaProgress, 1) : |
| @@ -3188,11 +2981,6 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data) | |||
| 3188 | if (dd->int_counter != (u32) -1) | 2981 | if (dd->int_counter != (u32) -1) |
| 3189 | dd->int_counter++; | 2982 | dd->int_counter++; |
| 3190 | 2983 | ||
| 3191 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 3192 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 3193 | qib_update_sdma_dca(ppd); | ||
| 3194 | #endif | ||
| 3195 | |||
| 3196 | /* Clear the interrupt bit we expect to be set. */ | 2984 | /* Clear the interrupt bit we expect to be set. */ |
| 3197 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 2985 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| 3198 | INT_MASK_PM(SDmaCleanupDone, 1) : | 2986 | INT_MASK_PM(SDmaCleanupDone, 1) : |
| @@ -4299,10 +4087,6 @@ static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, | |||
| 4299 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, | 4087 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, |
| 4300 | rcd->rcvhdrq_phys); | 4088 | rcd->rcvhdrq_phys); |
| 4301 | rcd->seq_cnt = 1; | 4089 | rcd->seq_cnt = 1; |
| 4302 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 4303 | if (dd->flags & QIB_DCA_ENABLED) | ||
| 4304 | qib_update_rhdrq_dca(rcd); | ||
| 4305 | #endif | ||
| 4306 | } | 4090 | } |
| 4307 | if (op & QIB_RCVCTRL_CTXT_DIS) | 4091 | if (op & QIB_RCVCTRL_CTXT_DIS) |
| 4308 | ppd->p_rcvctrl &= | 4092 | ppd->p_rcvctrl &= |
| @@ -5360,7 +5144,13 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
| 5360 | QIBL_IB_AUTONEG_INPROG))) | 5144 | QIBL_IB_AUTONEG_INPROG))) |
| 5361 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | 5145 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); |
| 5362 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | 5146 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { |
| 5147 | /* unlock the Tx settings, speed may change */ | ||
| 5148 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
| 5149 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 5150 | reset_tx_deemphasis_override)); | ||
| 5363 | qib_cancel_sends(ppd); | 5151 | qib_cancel_sends(ppd); |
| 5152 | /* on link down, ensure sane pcs state */ | ||
| 5153 | qib_7322_mini_pcs_reset(ppd); | ||
| 5364 | spin_lock_irqsave(&ppd->sdma_lock, flags); | 5154 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
| 5365 | if (__qib_sdma_running(ppd)) | 5155 | if (__qib_sdma_running(ppd)) |
| 5366 | __qib_sdma_process_event(ppd, | 5156 | __qib_sdma_process_event(ppd, |
| @@ -5766,26 +5556,28 @@ static void qib_init_7322_qsfp(struct qib_pportdata *ppd) | |||
| 5766 | } | 5556 | } |
| 5767 | 5557 | ||
| 5768 | /* | 5558 | /* |
| 5769 | * called at device initialization time, and also if the cable_atten | 5559 | * called at device initialization time, and also if the txselect |
| 5770 | * module parameter is changed. This is used for cables that don't | 5560 | * module parameter is changed. This is used for cables that don't |
| 5771 | * have valid QSFP EEPROMs (not present, or attenuation is zero). | 5561 | * have valid QSFP EEPROMs (not present, or attenuation is zero). |
| 5772 | * We initialize to the default, then if there is a specific | 5562 | * We initialize to the default, then if there is a specific |
| 5773 | * unit,port match, we use that. | 5563 | * unit,port match, we use that (and set it immediately, for the |
| 5564 | * current speed, if the link is at INIT or better). | ||
| 5774 | * String format is "default# unit#,port#=# ... u,p=#", separators must | 5565 | * String format is "default# unit#,port#=# ... u,p=#", separators must |
| 5775 | * be a SPACE character. A newline terminates. | 5566 | * be a SPACE character. A newline terminates. The u,p=# tuples may |
| 5567 | * optionally have "u,p=#,#", where the final # is the H1 value | ||
| 5776 | * The last specific match is used (actually, all are used, but last | 5568 | * The last specific match is used (actually, all are used, but last |
| 5777 | * one is the one that winds up set); if none at all, fall back on default. | 5569 | * one is the one that winds up set); if none at all, fall back on default. |
| 5778 | */ | 5570 | */ |
| 5779 | static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | 5571 | static void set_no_qsfp_atten(struct qib_devdata *dd, int change) |
| 5780 | { | 5572 | { |
| 5781 | char *nxt, *str; | 5573 | char *nxt, *str; |
| 5782 | int pidx, unit, port, deflt; | 5574 | u32 pidx, unit, port, deflt, h1; |
| 5783 | unsigned long val; | 5575 | unsigned long val; |
| 5784 | int any = 0; | 5576 | int any = 0, seth1; |
| 5785 | 5577 | ||
| 5786 | str = cable_atten_list; | 5578 | str = txselect_list; |
| 5787 | 5579 | ||
| 5788 | /* default number is validated in setup_cable_atten() */ | 5580 | /* default number is validated in setup_txselect() */ |
| 5789 | deflt = simple_strtoul(str, &nxt, 0); | 5581 | deflt = simple_strtoul(str, &nxt, 0); |
| 5790 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 5582 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
| 5791 | dd->pport[pidx].cpspec->no_eep = deflt; | 5583 | dd->pport[pidx].cpspec->no_eep = deflt; |
| @@ -5812,16 +5604,28 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
| 5812 | ; | 5604 | ; |
| 5813 | continue; | 5605 | continue; |
| 5814 | } | 5606 | } |
| 5815 | if (val >= TXDDS_TABLE_SZ) | 5607 | if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) |
| 5816 | continue; | 5608 | continue; |
| 5609 | seth1 = 0; | ||
| 5610 | h1 = 0; /* gcc thinks it might be used uninitted */ | ||
| 5611 | if (*nxt == ',' && nxt[1]) { | ||
| 5612 | str = ++nxt; | ||
| 5613 | h1 = (u32)simple_strtoul(str, &nxt, 0); | ||
| 5614 | if (nxt == str) | ||
| 5615 | while (*nxt && *nxt++ != ' ') /* skip */ | ||
| 5616 | ; | ||
| 5617 | else | ||
| 5618 | seth1 = 1; | ||
| 5619 | } | ||
| 5817 | for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; | 5620 | for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; |
| 5818 | ++pidx) { | 5621 | ++pidx) { |
| 5819 | if (dd->pport[pidx].port != port || | 5622 | struct qib_pportdata *ppd = &dd->pport[pidx]; |
| 5820 | !dd->pport[pidx].link_speed_supported) | 5623 | |
| 5624 | if (ppd->port != port || !ppd->link_speed_supported) | ||
| 5821 | continue; | 5625 | continue; |
| 5822 | dd->pport[pidx].cpspec->no_eep = val; | 5626 | ppd->cpspec->no_eep = val; |
| 5823 | /* now change the IBC and serdes, overriding generic */ | 5627 | /* now change the IBC and serdes, overriding generic */ |
| 5824 | init_txdds_table(&dd->pport[pidx], 1); | 5628 | init_txdds_table(ppd, 1); |
| 5825 | any++; | 5629 | any++; |
| 5826 | } | 5630 | } |
| 5827 | if (*nxt == '\n') | 5631 | if (*nxt == '\n') |
| @@ -5832,35 +5636,35 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
| 5832 | * Change the IBC and serdes, but since it's | 5636 | * Change the IBC and serdes, but since it's |
| 5833 | * general, don't override specific settings. | 5637 | * general, don't override specific settings. |
| 5834 | */ | 5638 | */ |
| 5835 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | 5639 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
| 5836 | if (!dd->pport[pidx].link_speed_supported) | 5640 | if (dd->pport[pidx].link_speed_supported) |
| 5837 | continue; | 5641 | init_txdds_table(&dd->pport[pidx], 0); |
| 5838 | init_txdds_table(&dd->pport[pidx], 0); | ||
| 5839 | } | ||
| 5840 | } | 5642 | } |
| 5841 | } | 5643 | } |
| 5842 | 5644 | ||
| 5843 | /* handle the cable_atten parameter changing */ | 5645 | /* handle the txselect parameter changing */ |
| 5844 | static int setup_cable_atten(const char *str, struct kernel_param *kp) | 5646 | static int setup_txselect(const char *str, struct kernel_param *kp) |
| 5845 | { | 5647 | { |
| 5846 | struct qib_devdata *dd; | 5648 | struct qib_devdata *dd; |
| 5847 | unsigned long val; | 5649 | unsigned long val; |
| 5848 | char *n; | 5650 | char *n; |
| 5849 | if (strlen(str) >= MAX_ATTEN_LEN) { | 5651 | if (strlen(str) >= MAX_ATTEN_LEN) { |
| 5850 | printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string " | 5652 | printk(KERN_INFO QIB_DRV_NAME " txselect_values string " |
| 5851 | "too long\n"); | 5653 | "too long\n"); |
| 5852 | return -ENOSPC; | 5654 | return -ENOSPC; |
| 5853 | } | 5655 | } |
| 5854 | val = simple_strtoul(str, &n, 0); | 5656 | val = simple_strtoul(str, &n, 0); |
| 5855 | if (n == str || val >= TXDDS_TABLE_SZ) { | 5657 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { |
| 5856 | printk(KERN_INFO QIB_DRV_NAME | 5658 | printk(KERN_INFO QIB_DRV_NAME |
| 5857 | "cable_atten_values must start with a number\n"); | 5659 | "txselect_values must start with a number < %d\n", |
| 5660 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | ||
| 5858 | return -EINVAL; | 5661 | return -EINVAL; |
| 5859 | } | 5662 | } |
| 5860 | strcpy(cable_atten_list, str); | 5663 | strcpy(txselect_list, str); |
| 5861 | 5664 | ||
| 5862 | list_for_each_entry(dd, &qib_dev_list, list) | 5665 | list_for_each_entry(dd, &qib_dev_list, list) |
| 5863 | set_no_qsfp_atten(dd, 1); | 5666 | if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) |
| 5667 | set_no_qsfp_atten(dd, 1); | ||
| 5864 | return 0; | 5668 | return 0; |
| 5865 | } | 5669 | } |
| 5866 | 5670 | ||
| @@ -6261,28 +6065,17 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
| 6261 | * in adapter-specific routines. | 6065 | * in adapter-specific routines. |
| 6262 | */ | 6066 | */ |
| 6263 | if (!(ppd->dd->flags & QIB_HAS_QSFP)) { | 6067 | if (!(ppd->dd->flags & QIB_HAS_QSFP)) { |
| 6264 | int i; | ||
| 6265 | const struct txdds_ent *txdds; | ||
| 6266 | |||
| 6267 | if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) | 6068 | if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) |
| 6268 | qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " | 6069 | qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " |
| 6269 | "Unknown mezzanine card type\n", | 6070 | "Unknown mezzanine card type\n", |
| 6270 | ppd->dd->unit, ppd->port); | 6071 | dd->unit, ppd->port); |
| 6271 | txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds : | 6072 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; |
| 6272 | &qme_qdr_txdds; | ||
| 6273 | |||
| 6274 | /* | 6073 | /* |
| 6275 | * set values in case link comes up | 6074 | * Choose center value as default tx serdes setting |
| 6276 | * before table is written to driver. | 6075 | * until changed through module parameter. |
| 6277 | */ | 6076 | */ |
| 6278 | cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH : | 6077 | ppd->cpspec->no_eep = IS_QMH(dd) ? |
| 6279 | H1_FORCE_QME; | 6078 | TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; |
| 6280 | for (i = 0; i < SERDES_CHANS; i++) { | ||
| 6281 | cp->amp[i] = txdds->amp; | ||
| 6282 | cp->pre[i] = txdds->pre; | ||
| 6283 | cp->mainv[i] = txdds->main; | ||
| 6284 | cp->post[i] = txdds->post; | ||
| 6285 | } | ||
| 6286 | } else | 6079 | } else |
| 6287 | cp->h1_val = H1_FORCE_VAL; | 6080 | cp->h1_val = H1_FORCE_VAL; |
| 6288 | 6081 | ||
| @@ -6299,8 +6092,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
| 6299 | 6092 | ||
| 6300 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | 6093 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; |
| 6301 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | 6094 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; |
| 6302 | dd->rhf_offset = | 6095 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); |
| 6303 | dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | ||
| 6304 | 6096 | ||
| 6305 | /* we always allocate at least 2048 bytes for eager buffers */ | 6097 | /* we always allocate at least 2048 bytes for eager buffers */ |
| 6306 | dd->rcvegrbufsize = max(mtu, 2048); | 6098 | dd->rcvegrbufsize = max(mtu, 2048); |
| @@ -6919,13 +6711,6 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
| 6919 | /* clear diagctrl register, in case diags were running and crashed */ | 6711 | /* clear diagctrl register, in case diags were running and crashed */ |
| 6920 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | 6712 | qib_write_kreg(dd, kr_hwdiagctrl, 0); |
| 6921 | 6713 | ||
| 6922 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
| 6923 | ret = dca_add_requester(&pdev->dev); | ||
| 6924 | if (!ret) { | ||
| 6925 | dd->flags |= QIB_DCA_ENABLED; | ||
| 6926 | qib_setup_dca(dd); | ||
| 6927 | } | ||
| 6928 | #endif | ||
| 6929 | goto bail; | 6714 | goto bail; |
| 6930 | 6715 | ||
| 6931 | bail_cleanup: | 6716 | bail_cleanup: |
| @@ -7111,8 +6896,8 @@ static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { | |||
| 7111 | static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { | 6896 | static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { |
| 7112 | /* amp, pre, main, post */ | 6897 | /* amp, pre, main, post */ |
| 7113 | { 2, 2, 15, 6 }, /* Loopback */ | 6898 | { 2, 2, 15, 6 }, /* Loopback */ |
| 7114 | { 0, 1, 0, 7 }, /* 2 dB */ | 6899 | { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ |
| 7115 | { 0, 1, 0, 9 }, /* 3 dB */ | 6900 | { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ |
| 7116 | { 0, 1, 0, 11 }, /* 4 dB */ | 6901 | { 0, 1, 0, 11 }, /* 4 dB */ |
| 7117 | { 0, 1, 0, 13 }, /* 5 dB */ | 6902 | { 0, 1, 0, 13 }, /* 5 dB */ |
| 7118 | { 0, 1, 0, 15 }, /* 6 dB */ | 6903 | { 0, 1, 0, 15 }, /* 6 dB */ |
| @@ -7128,6 +6913,57 @@ static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { | |||
| 7128 | { 0, 2, 9, 15 }, /* 16 dB */ | 6913 | { 0, 2, 9, 15 }, /* 16 dB */ |
| 7129 | }; | 6914 | }; |
| 7130 | 6915 | ||
| 6916 | /* | ||
| 6917 | * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. | ||
| 6918 | * These are mostly used for mez cards going through connectors | ||
| 6919 | * and backplane traces, but can be used to add other "unusual" | ||
| 6920 | * table values as well. | ||
| 6921 | */ | ||
| 6922 | static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { | ||
| 6923 | /* amp, pre, main, post */ | ||
| 6924 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | ||
| 6925 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | ||
| 6926 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | ||
| 6927 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | ||
| 6928 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6929 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6930 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6931 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6932 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6933 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6934 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | ||
| 6935 | }; | ||
| 6936 | |||
| 6937 | static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | ||
| 6938 | /* amp, pre, main, post */ | ||
| 6939 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | ||
| 6940 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | ||
| 6941 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | ||
| 6942 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | ||
| 6943 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6944 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6945 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6946 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6947 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6948 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6949 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | ||
| 6950 | }; | ||
| 6951 | |||
| 6952 | static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | ||
| 6953 | /* amp, pre, main, post */ | ||
| 6954 | { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ | ||
| 6955 | { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ | ||
| 6956 | { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ | ||
| 6957 | { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ | ||
| 6958 | { 0, 1, 12, 10 }, /* QME7342 backplane setting */ | ||
| 6959 | { 0, 1, 12, 11 }, /* QME7342 backplane setting */ | ||
| 6960 | { 0, 1, 12, 12 }, /* QME7342 backplane setting */ | ||
| 6961 | { 0, 1, 12, 14 }, /* QME7342 backplane setting */ | ||
| 6962 | { 0, 1, 12, 6 }, /* QME7342 backplane setting */ | ||
| 6963 | { 0, 1, 12, 7 }, /* QME7342 backplane setting */ | ||
| 6964 | { 0, 1, 12, 8 }, /* QME7342 backplane setting */ | ||
| 6965 | }; | ||
| 6966 | |||
| 7131 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | 6967 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
| 7132 | unsigned atten) | 6968 | unsigned atten) |
| 7133 | { | 6969 | { |
| @@ -7145,7 +6981,7 @@ static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | |||
| 7145 | } | 6981 | } |
| 7146 | 6982 | ||
| 7147 | /* | 6983 | /* |
| 7148 | * if override is set, the module parameter cable_atten has a value | 6984 | * if override is set, the module parameter txselect has a value |
| 7149 | * for this specific port, so use it, rather than our normal mechanism. | 6985 | * for this specific port, so use it, rather than our normal mechanism. |
| 7150 | */ | 6986 | */ |
| 7151 | static void find_best_ent(struct qib_pportdata *ppd, | 6987 | static void find_best_ent(struct qib_pportdata *ppd, |
| @@ -7184,15 +7020,28 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
| 7184 | *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); | 7020 | *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); |
| 7185 | *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); | 7021 | *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); |
| 7186 | return; | 7022 | return; |
| 7187 | } else { | 7023 | } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { |
| 7188 | /* | 7024 | /* |
| 7189 | * If we have no (or incomplete) data from the cable | 7025 | * If we have no (or incomplete) data from the cable |
| 7190 | * EEPROM, or no QSFP, use the module parameter value | 7026 | * EEPROM, or no QSFP, or override is set, use the |
| 7191 | * to index into the attentuation table. | 7027 | * module parameter value to index into the attentuation |
| 7028 | * table. | ||
| 7192 | */ | 7029 | */ |
| 7193 | *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep]; | 7030 | idx = ppd->cpspec->no_eep; |
| 7194 | *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep]; | 7031 | *sdr_dds = &txdds_sdr[idx]; |
| 7195 | *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep]; | 7032 | *ddr_dds = &txdds_ddr[idx]; |
| 7033 | *qdr_dds = &txdds_qdr[idx]; | ||
| 7034 | } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { | ||
| 7035 | /* similar to above, but index into the "extra" table. */ | ||
| 7036 | idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; | ||
| 7037 | *sdr_dds = &txdds_extra_sdr[idx]; | ||
| 7038 | *ddr_dds = &txdds_extra_ddr[idx]; | ||
| 7039 | *qdr_dds = &txdds_extra_qdr[idx]; | ||
| 7040 | } else { | ||
| 7041 | /* this shouldn't happen, it's range checked */ | ||
| 7042 | *sdr_dds = txdds_sdr + qib_long_atten; | ||
| 7043 | *ddr_dds = txdds_ddr + qib_long_atten; | ||
| 7044 | *qdr_dds = txdds_qdr + qib_long_atten; | ||
| 7196 | } | 7045 | } |
| 7197 | } | 7046 | } |
| 7198 | 7047 | ||
| @@ -7203,33 +7052,24 @@ static void init_txdds_table(struct qib_pportdata *ppd, int override) | |||
| 7203 | int idx; | 7052 | int idx; |
| 7204 | int single_ent = 0; | 7053 | int single_ent = 0; |
| 7205 | 7054 | ||
| 7206 | if (IS_QMH(ppd->dd)) { | 7055 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); |
| 7207 | /* normally will be overridden, via setup_qmh() */ | 7056 | |
| 7208 | sdr_dds = &qmh_sdr_txdds; | 7057 | /* for mez cards or override, use the selected value for all entries */ |
| 7209 | ddr_dds = &qmh_ddr_txdds; | 7058 | if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) |
| 7210 | qdr_dds = &qmh_qdr_txdds; | ||
| 7211 | single_ent = 1; | ||
| 7212 | } else if (IS_QME(ppd->dd)) { | ||
| 7213 | sdr_dds = &qme_sdr_txdds; | ||
| 7214 | ddr_dds = &qme_ddr_txdds; | ||
| 7215 | qdr_dds = &qme_qdr_txdds; | ||
| 7216 | single_ent = 1; | 7059 | single_ent = 1; |
| 7217 | } else | ||
| 7218 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); | ||
| 7219 | 7060 | ||
| 7220 | /* Fill in the first entry with the best entry found. */ | 7061 | /* Fill in the first entry with the best entry found. */ |
| 7221 | set_txdds(ppd, 0, sdr_dds); | 7062 | set_txdds(ppd, 0, sdr_dds); |
| 7222 | set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); | 7063 | set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); |
| 7223 | set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); | 7064 | set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); |
| 7224 | 7065 | if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | | |
| 7225 | /* | 7066 | QIBL_LINKACTIVE)) { |
| 7226 | * for our current speed, also write that value into the | 7067 | dds = (struct txdds_ent *)(ppd->link_speed_active == |
| 7227 | * tx serdes registers. | 7068 | QIB_IB_QDR ? qdr_dds : |
| 7228 | */ | 7069 | (ppd->link_speed_active == |
| 7229 | dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? | 7070 | QIB_IB_DDR ? ddr_dds : sdr_dds)); |
| 7230 | qdr_dds : (ppd->link_speed_active == | 7071 | write_tx_serdes_param(ppd, dds); |
| 7231 | QIB_IB_DDR ? ddr_dds : sdr_dds)); | 7072 | } |
| 7232 | write_tx_serdes_param(ppd, dds); | ||
| 7233 | 7073 | ||
| 7234 | /* Fill in the remaining entries with the default table values. */ | 7074 | /* Fill in the remaining entries with the default table values. */ |
| 7235 | for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { | 7075 | for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { |
| @@ -7352,6 +7192,11 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
| 7352 | */ | 7192 | */ |
| 7353 | init_txdds_table(ppd, 0); | 7193 | init_txdds_table(ppd, 0); |
| 7354 | 7194 | ||
| 7195 | /* ensure no tx overrides from earlier driver loads */ | ||
| 7196 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
| 7197 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7198 | reset_tx_deemphasis_override)); | ||
| 7199 | |||
| 7355 | /* Patch some SerDes defaults to "Better for IB" */ | 7200 | /* Patch some SerDes defaults to "Better for IB" */ |
| 7356 | /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ | 7201 | /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ |
| 7357 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | 7202 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); |
| @@ -7421,7 +7266,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
| 7421 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | 7266 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); |
| 7422 | ppd->cpspec->qdr_dfe_on = 1; | 7267 | ppd->cpspec->qdr_dfe_on = 1; |
| 7423 | 7268 | ||
| 7424 | /* (FLoop LOS gate: PPM filter enabled */ | 7269 | /* FLoop LOS gate: PPM filter enabled */ |
| 7425 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | 7270 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); |
| 7426 | 7271 | ||
| 7427 | /* rx offset center enabled */ | 7272 | /* rx offset center enabled */ |
| @@ -7486,68 +7331,39 @@ static void write_tx_serdes_param(struct qib_pportdata *ppd, | |||
| 7486 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | | 7331 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | |
| 7487 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | | 7332 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | |
| 7488 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); | 7333 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); |
| 7489 | deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 7334 | |
| 7490 | tx_override_deemphasis_select); | 7335 | deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
| 7491 | deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 7336 | tx_override_deemphasis_select); |
| 7492 | txampcntl_d2a); | 7337 | deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
| 7493 | deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 7338 | txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
| 7494 | txc0_ena); | 7339 | txampcntl_d2a); |
| 7495 | deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 7340 | deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
| 7496 | txcp1_ena); | 7341 | txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
| 7497 | deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 7342 | txc0_ena); |
| 7343 | deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7344 | txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7345 | txcp1_ena); | ||
| 7346 | deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7347 | txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7498 | txcn1_ena); | 7348 | txcn1_ena); |
| 7499 | qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); | 7349 | qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); |
| 7500 | } | 7350 | } |
| 7501 | 7351 | ||
| 7502 | /* | 7352 | /* |
| 7503 | * set per-bay, per channel parameters. For now, we ignore | 7353 | * Set the parameters for mez cards on link bounce, so they are |
| 7504 | * do_tx, and always set tx parameters, and set them with the same value | 7354 | * always exactly what was requested. Similar logic to init_txdds |
| 7505 | * for all channels, using the channel 0 value. We may switch to | 7355 | * but does just the serdes. |
| 7506 | * per-channel settings in the future, and that method only needs | ||
| 7507 | * to be done once. | ||
| 7508 | * Because this also writes the IBC txdds table with a single set | ||
| 7509 | * of values, it should be called only for cases where we want to completely | ||
| 7510 | * force a specific setting, typically only for mez cards. | ||
| 7511 | */ | 7356 | */ |
| 7512 | static void adj_tx_serdes(struct qib_pportdata *ppd) | 7357 | static void adj_tx_serdes(struct qib_pportdata *ppd) |
| 7513 | { | 7358 | { |
| 7514 | struct txdds_ent txdds; | 7359 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; |
| 7515 | int i; | 7360 | struct txdds_ent *dds; |
| 7516 | u8 *amp, *pre, *mainv, *post; | ||
| 7517 | |||
| 7518 | /* | ||
| 7519 | * Because we use TX_DEEMPHASIS_OVERRIDE, we need to | ||
| 7520 | * always do tx side, just like H1, since it is cleared | ||
| 7521 | * by link down | ||
| 7522 | */ | ||
| 7523 | amp = ppd->cpspec->amp; | ||
| 7524 | pre = ppd->cpspec->pre; | ||
| 7525 | mainv = ppd->cpspec->mainv; | ||
| 7526 | post = ppd->cpspec->post; | ||
| 7527 | |||
| 7528 | amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7529 | txampcntl_d2a); | ||
| 7530 | mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7531 | txc0_ena); | ||
| 7532 | post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7533 | txcp1_ena); | ||
| 7534 | pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7535 | txcn1_ena); | ||
| 7536 | |||
| 7537 | /* | ||
| 7538 | * Use the channel zero values, only, for now, for | ||
| 7539 | * all channels | ||
| 7540 | */ | ||
| 7541 | txdds.amp = amp[0]; | ||
| 7542 | txdds.pre = pre[0]; | ||
| 7543 | txdds.main = mainv[0]; | ||
| 7544 | txdds.post = post[0]; | ||
| 7545 | |||
| 7546 | /* write the QDR table for IBC use, as backup for link down */ | ||
| 7547 | for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i) | ||
| 7548 | set_txdds(ppd, i + 32, &txdds); | ||
| 7549 | 7361 | ||
| 7550 | write_tx_serdes_param(ppd, &txdds); | 7362 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); |
| 7363 | dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? | ||
| 7364 | qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? | ||
| 7365 | ddr_dds : sdr_dds)); | ||
| 7366 | write_tx_serdes_param(ppd, dds); | ||
| 7551 | } | 7367 | } |
| 7552 | 7368 | ||
| 7553 | /* set QDR forced value for H1, if needed */ | 7369 | /* set QDR forced value for H1, if needed */ |
| @@ -7567,235 +7383,6 @@ static void force_h1(struct qib_pportdata *ppd) | |||
| 7567 | } | 7383 | } |
| 7568 | } | 7384 | } |
| 7569 | 7385 | ||
| 7570 | /* | ||
| 7571 | * Parse the parameters for the QMH7342, to get rx and tx serdes | ||
| 7572 | * settings for that Bay, for both possible mez connectors (PCIe bus) | ||
| 7573 | * and IB link (one link on mez1, two possible on mez2). | ||
| 7574 | * | ||
| 7575 | * Data is comma or white space separated. | ||
| 7576 | * | ||
| 7577 | * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values, | ||
| 7578 | * one per IB lane (serdes channel). | ||
| 7579 | * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR). | ||
| 7580 | * The Bay # is used only for debugging currently. | ||
| 7581 | * H1 values are set whenever the link goes down, or is at cfg_test or | ||
| 7582 | * cfg_wait_enh. Tx values are programmed once, when this routine is called | ||
| 7583 | * (and with default values at chip initialization). Values are any base, in | ||
| 7584 | * strtoul style, and values are seperated by comma, or any white space | ||
| 7585 | * (space, tab, newline). | ||
| 7586 | * | ||
| 7587 | * An example set might look like this (white space vs | ||
| 7588 | * comma used for human ease of reading) | ||
| 7589 | * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1, | ||
| 7590 | * repeat for mez2 IB1, then mez2 IB2. | ||
| 7591 | * | ||
| 7592 | * B B H1:0 amp:0 pre:0 post: 0 main:0 | ||
| 7593 | * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1 | ||
| 7594 | * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2 | ||
| 7595 | * H1: 4 amp: 3 pre: 3 post: 3 main: 3 | ||
| 7596 | * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
| 7597 | * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
| 7598 | * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
| 7599 | */ | ||
| 7600 | #define N_QMH_FIELDS 22 | ||
| 7601 | static int setup_qmh_params(const char *str, struct kernel_param *kp) | ||
| 7602 | { | ||
| 7603 | char *abuf, *v, *nv, *nvp; | ||
| 7604 | struct qib_devdata *dd; | ||
| 7605 | struct qib_pportdata *ppd; | ||
| 7606 | u32 mez, vlen, nf, port, bay; | ||
| 7607 | int ret = 0, found = 0; | ||
| 7608 | |||
| 7609 | vlen = strlen(str) + 1; | ||
| 7610 | abuf = kmalloc(vlen, GFP_KERNEL); | ||
| 7611 | if (!abuf) { | ||
| 7612 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7613 | " Unable to allocate QMH param buffer; ignoring\n"); | ||
| 7614 | return 0; | ||
| 7615 | } | ||
| 7616 | memcpy(abuf, str, vlen); | ||
| 7617 | v = abuf; | ||
| 7618 | |||
| 7619 | /* these 3 are because gcc can't know they are set before used */ | ||
| 7620 | port = 1; | ||
| 7621 | mez = 1; /* used only for debugging */ | ||
| 7622 | bay = 0; /* used only for debugging */ | ||
| 7623 | ppd = NULL; | ||
| 7624 | for (nf = 0; (nv = strsep(&v, ", \t\n\r")) && | ||
| 7625 | nf < (N_QMH_FIELDS * 3);) { | ||
| 7626 | u32 val; | ||
| 7627 | |||
| 7628 | if (!*nv) | ||
| 7629 | /* allow for multiple separators */ | ||
| 7630 | continue; | ||
| 7631 | |||
| 7632 | val = simple_strtoul(nv, &nvp, 0); | ||
| 7633 | if (nv == nvp) { | ||
| 7634 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7635 | " Bay%u, mez%u IB%u non-numeric value (%s) " | ||
| 7636 | "field #%u, ignoring rest\n", bay, mez, | ||
| 7637 | port, nv, nf % (N_QMH_FIELDS * 3)); | ||
| 7638 | ret = -EINVAL; | ||
| 7639 | goto bail; | ||
| 7640 | } | ||
| 7641 | if (!(nf % N_QMH_FIELDS)) { | ||
| 7642 | ppd = NULL; | ||
| 7643 | bay = val; | ||
| 7644 | if (!bay || bay > 16) { | ||
| 7645 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7646 | " Invalid bay # %u, field %u, " | ||
| 7647 | "ignoring rest\n", bay, nf); | ||
| 7648 | ret = -EINVAL; | ||
| 7649 | goto bail; | ||
| 7650 | } | ||
| 7651 | } else if ((nf % N_QMH_FIELDS) == 1) { | ||
| 7652 | u32 bus = val; | ||
| 7653 | if (nf == 1) { | ||
| 7654 | mez = 1; | ||
| 7655 | port = 1; | ||
| 7656 | } else if (nf == (N_QMH_FIELDS + 1)) { | ||
| 7657 | mez = 2; | ||
| 7658 | port = 1; | ||
| 7659 | } else { | ||
| 7660 | mez = 2; | ||
| 7661 | port = 2; | ||
| 7662 | } | ||
| 7663 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
| 7664 | if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 | ||
| 7665 | || !IS_QMH(dd)) | ||
| 7666 | continue; /* only for QMH cards */ | ||
| 7667 | if (dd->pcidev->bus->number == bus) { | ||
| 7668 | found++; | ||
| 7669 | ppd = &dd->pport[port - 1]; | ||
| 7670 | } | ||
| 7671 | } | ||
| 7672 | } else if (ppd) { | ||
| 7673 | u32 parm = (nf % N_QMH_FIELDS) - 2; | ||
| 7674 | if (parm < SERDES_CHANS && !(parm % SERDES_CHANS)) | ||
| 7675 | ppd->cpspec->h1_val = val; | ||
| 7676 | else if (parm < (2 * SERDES_CHANS)) | ||
| 7677 | ppd->cpspec->amp[parm % SERDES_CHANS] = val; | ||
| 7678 | else if (parm < (3 * SERDES_CHANS)) | ||
| 7679 | ppd->cpspec->pre[parm % SERDES_CHANS] = val; | ||
| 7680 | else if (parm < (4 * SERDES_CHANS)) | ||
| 7681 | ppd->cpspec->post[parm % SERDES_CHANS] = val; | ||
| 7682 | else { | ||
| 7683 | ppd->cpspec->mainv[parm % SERDES_CHANS] = val; | ||
| 7684 | /* At the end of a port, set params */ | ||
| 7685 | if (parm == ((5 * SERDES_CHANS) - 1)) | ||
| 7686 | adj_tx_serdes(ppd); | ||
| 7687 | } | ||
| 7688 | } | ||
| 7689 | nf++; | ||
| 7690 | } | ||
| 7691 | if (!found) { | ||
| 7692 | printk(KERN_ERR QIB_DRV_NAME | ||
| 7693 | ": No match found for qmh_serdes_setup parameter\n"); | ||
| 7694 | ret = -EINVAL; | ||
| 7695 | } | ||
| 7696 | bail: | ||
| 7697 | kfree(abuf); | ||
| 7698 | return ret; | ||
| 7699 | } | ||
| 7700 | |||
| 7701 | /* | ||
| 7702 | * Similarly for QME7342, but the format is simpler, values are the | ||
| 7703 | * same for all mez card positions in a blade (2 or 4 per blade), but | ||
| 7704 | * are different for some blades vs others, and we don't need to | ||
| 7705 | * specify different parameters for different serdes channels or different | ||
| 7706 | * IB ports. | ||
| 7707 | * Format is: h1 amp,pre,post,main | ||
| 7708 | * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main | ||
| 7709 | */ | ||
| 7710 | #define N_QME_FIELDS 5 | ||
| 7711 | static int setup_qme_params(const char *str, struct kernel_param *kp) | ||
| 7712 | { | ||
| 7713 | char *abuf, *v, *nv, *nvp; | ||
| 7714 | struct qib_devdata *dd; | ||
| 7715 | u32 vlen, nf, port = 0; | ||
| 7716 | u8 h1, tx[4]; /* amp, pre, post, main */ | ||
| 7717 | int ret = -EINVAL; | ||
| 7718 | char *seplist; | ||
| 7719 | |||
| 7720 | vlen = strlen(str) + 1; | ||
| 7721 | abuf = kmalloc(vlen, GFP_KERNEL); | ||
| 7722 | if (!abuf) { | ||
| 7723 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7724 | " Unable to allocate QME param buffer; ignoring\n"); | ||
| 7725 | return 0; | ||
| 7726 | } | ||
| 7727 | strncpy(abuf, str, vlen); | ||
| 7728 | |||
| 7729 | v = abuf; | ||
| 7730 | seplist = " \t"; | ||
| 7731 | h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */ | ||
| 7732 | |||
| 7733 | for (nf = 0; (nv = strsep(&v, seplist)); ) { | ||
| 7734 | u32 val; | ||
| 7735 | |||
| 7736 | if (!*nv) | ||
| 7737 | /* allow for multiple separators */ | ||
| 7738 | continue; | ||
| 7739 | |||
| 7740 | if (!nf && *nv == 'P') { | ||
| 7741 | /* alternate format with port */ | ||
| 7742 | val = simple_strtoul(++nv, &nvp, 0); | ||
| 7743 | if (nv == nvp || port >= NUM_IB_PORTS) { | ||
| 7744 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7745 | " %s: non-numeric port value (%s) " | ||
| 7746 | "ignoring rest\n", __func__, nv); | ||
| 7747 | goto done; | ||
| 7748 | } | ||
| 7749 | port = val; | ||
| 7750 | continue; /* without incrementing nf */ | ||
| 7751 | } | ||
| 7752 | val = simple_strtoul(nv, &nvp, 0); | ||
| 7753 | if (nv == nvp) { | ||
| 7754 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7755 | " %s: non-numeric value (%s) " | ||
| 7756 | "field #%u, ignoring rest\n", __func__, | ||
| 7757 | nv, nf); | ||
| 7758 | goto done; | ||
| 7759 | } | ||
| 7760 | if (!nf) { | ||
| 7761 | h1 = val; | ||
| 7762 | seplist = ","; | ||
| 7763 | } else | ||
| 7764 | tx[nf - 1] = val; | ||
| 7765 | if (++nf == N_QME_FIELDS) { | ||
| 7766 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
| 7767 | int pidx, i; | ||
| 7768 | if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 | ||
| 7769 | || !IS_QME(dd)) | ||
| 7770 | continue; /* only for QME cards */ | ||
| 7771 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
| 7772 | struct qib_pportdata *ppd; | ||
| 7773 | ppd = &dd->pport[pidx]; | ||
| 7774 | if ((port && ppd->port != port) || | ||
| 7775 | !ppd->link_speed_supported) | ||
| 7776 | continue; | ||
| 7777 | ppd->cpspec->h1_val = h1; | ||
| 7778 | for (i = 0; i < SERDES_CHANS; i++) { | ||
| 7779 | ppd->cpspec->amp[i] = tx[0]; | ||
| 7780 | ppd->cpspec->pre[i] = tx[1]; | ||
| 7781 | ppd->cpspec->post[i] = tx[2]; | ||
| 7782 | ppd->cpspec->mainv[i] = tx[3]; | ||
| 7783 | } | ||
| 7784 | adj_tx_serdes(ppd); | ||
| 7785 | } | ||
| 7786 | } | ||
| 7787 | ret = 0; | ||
| 7788 | goto done; | ||
| 7789 | } | ||
| 7790 | } | ||
| 7791 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7792 | " %s: Only %u of %u fields provided, skipping\n", | ||
| 7793 | __func__, nf, N_QME_FIELDS); | ||
| 7794 | done: | ||
| 7795 | kfree(abuf); | ||
| 7796 | return ret; | ||
| 7797 | } | ||
| 7798 | |||
| 7799 | #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) | 7386 | #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) |
| 7800 | #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) | 7387 | #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) |
| 7801 | 7388 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index c0139c07e97e..9b40f345ac3f 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -1237,7 +1237,13 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
| 1237 | */ | 1237 | */ |
| 1238 | switch (ent->device) { | 1238 | switch (ent->device) { |
| 1239 | case PCI_DEVICE_ID_QLOGIC_IB_6120: | 1239 | case PCI_DEVICE_ID_QLOGIC_IB_6120: |
| 1240 | #ifdef CONFIG_PCI_MSI | ||
| 1240 | dd = qib_init_iba6120_funcs(pdev, ent); | 1241 | dd = qib_init_iba6120_funcs(pdev, ent); |
| 1242 | #else | ||
| 1243 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | ||
| 1244 | "work if CONFIG_PCI_MSI is not enabled\n", | ||
| 1245 | ent->device); | ||
| 1246 | #endif | ||
| 1241 | break; | 1247 | break; |
| 1242 | 1248 | ||
| 1243 | case PCI_DEVICE_ID_QLOGIC_IB_7220: | 1249 | case PCI_DEVICE_ID_QLOGIC_IB_7220: |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index f98d17a7108b..81bf25e67ce1 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
| @@ -21,7 +21,7 @@ comment "LED drivers" | |||
| 21 | 21 | ||
| 22 | config LEDS_88PM860X | 22 | config LEDS_88PM860X |
| 23 | tristate "LED Support for Marvell 88PM860x PMIC" | 23 | tristate "LED Support for Marvell 88PM860x PMIC" |
| 24 | depends on LEDS_CLASS && MFD_88PM860X | 24 | depends on MFD_88PM860X |
| 25 | help | 25 | help |
| 26 | This option enables support for on-chip LED drivers found on Marvell | 26 | This option enables support for on-chip LED drivers found on Marvell |
| 27 | Semiconductor 88PM8606 PMIC. | 27 | Semiconductor 88PM8606 PMIC. |
| @@ -69,8 +69,8 @@ config LEDS_NET48XX | |||
| 69 | 69 | ||
| 70 | config LEDS_NET5501 | 70 | config LEDS_NET5501 |
| 71 | tristate "LED Support for Soekris net5501 series Error LED" | 71 | tristate "LED Support for Soekris net5501 series Error LED" |
| 72 | depends on LEDS_CLASS && LEDS_TRIGGERS | 72 | depends on LEDS_TRIGGERS |
| 73 | depends on LEDS_GPIO_PLATFORM && GPIO_CS5535 | 73 | depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535 |
| 74 | select LEDS_TRIGGER_DEFAULT_ON | 74 | select LEDS_TRIGGER_DEFAULT_ON |
| 75 | default n | 75 | default n |
| 76 | help | 76 | help |
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 26843dd6b859..cc22eeefa10b 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c | |||
| @@ -250,7 +250,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev, | |||
| 250 | led.gpio = of_get_gpio_flags(child, 0, &flags); | 250 | led.gpio = of_get_gpio_flags(child, 0, &flags); |
| 251 | led.active_low = flags & OF_GPIO_ACTIVE_LOW; | 251 | led.active_low = flags & OF_GPIO_ACTIVE_LOW; |
| 252 | led.name = of_get_property(child, "label", NULL) ? : child->name; | 252 | led.name = of_get_property(child, "label", NULL) ? : child->name; |
| 253 | led.blinking = 0; | ||
| 254 | led.default_trigger = | 253 | led.default_trigger = |
| 255 | of_get_property(child, "linux,default-trigger", NULL); | 254 | of_get_property(child, "linux,default-trigger", NULL); |
| 256 | state = of_get_property(child, "default-state", NULL); | 255 | state = of_get_property(child, "default-state", NULL); |
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 72ebb3f06b86..4dfa6b90c21c 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
| @@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) | |||
| 189 | return new_offset; | 189 | return new_offset; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static int vol_cdev_fsync(struct file *file, struct dentry *dentry, | 192 | static int vol_cdev_fsync(struct file *file, int datasync) |
| 193 | int datasync) | ||
| 194 | { | 193 | { |
| 195 | struct ubi_volume_desc *desc = file->private_data; | 194 | struct ubi_volume_desc *desc = file->private_data; |
| 196 | struct ubi_device *ubi = desc->vol->ubi; | 195 | struct ubi_device *ubi = desc->vol->ubi; |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 82eaf65d2d85..ea9b7a098c9b 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
| @@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) | |||
| 551 | void __iomem *shmem; | 551 | void __iomem *shmem; |
| 552 | 552 | ||
| 553 | if (dev == NULL) { | 553 | if (dev == NULL) { |
| 554 | pr_err("%s: net_interrupt(): irq %d for unknown device.\n", | 554 | pr_err("net_interrupt(): irq %d for unknown device.\n", irq); |
| 555 | dev->name, irq); | ||
| 556 | return IRQ_NONE; | 555 | return IRQ_NONE; |
| 557 | } | 556 | } |
| 558 | 557 | ||
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index c911bfb55b19..9d11dbf5e4da 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -294,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter) | |||
| 294 | } else { | 294 | } else { |
| 295 | return 0; | 295 | return 0; |
| 296 | } | 296 | } |
| 297 | } while (timeout < 20); | 297 | } while (timeout < 40); |
| 298 | 298 | ||
| 299 | dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); | 299 | dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); |
| 300 | return -1; | 300 | return -1; |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index aa065c71ddd8..54b14272f333 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter) | |||
| 1861 | goto if_destroy; | 1861 | goto if_destroy; |
| 1862 | } | 1862 | } |
| 1863 | vf++; | 1863 | vf++; |
| 1864 | } while (vf < num_vfs); | 1864 | } |
| 1865 | } else if (!be_physfn(adapter)) { | 1865 | } else if (!be_physfn(adapter)) { |
| 1866 | status = be_cmd_mac_addr_query(adapter, mac, | 1866 | status = be_cmd_mac_addr_query(adapter, mac, |
| 1867 | MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); | 1867 | MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index be90d3598bca..fe925663d39a 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
| @@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev) | |||
| 3367 | 3367 | ||
| 3368 | static void cnic_init_context(struct cnic_dev *dev, u32 cid) | 3368 | static void cnic_init_context(struct cnic_dev *dev, u32 cid) |
| 3369 | { | 3369 | { |
| 3370 | struct cnic_local *cp = dev->cnic_priv; | ||
| 3371 | u32 cid_addr; | 3370 | u32 cid_addr; |
| 3372 | int i; | 3371 | int i; |
| 3373 | 3372 | ||
| 3374 | if (CHIP_NUM(cp) == CHIP_NUM_5709) | ||
| 3375 | return; | ||
| 3376 | |||
| 3377 | cid_addr = GET_CID_ADDR(cid); | 3373 | cid_addr = GET_CID_ADDR(cid); |
| 3378 | 3374 | ||
| 3379 | for (i = 0; i < CTX_SIZE; i += 4) | 3375 | for (i = 0; i < CTX_SIZE; i += 4) |
| @@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) | |||
| 3530 | 3526 | ||
| 3531 | sb_id = cp->status_blk_num; | 3527 | sb_id = cp->status_blk_num; |
| 3532 | tx_cid = 20; | 3528 | tx_cid = 20; |
| 3533 | cnic_init_context(dev, tx_cid); | ||
| 3534 | cnic_init_context(dev, tx_cid + 1); | ||
| 3535 | cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; | 3529 | cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; |
| 3536 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | 3530 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { |
| 3537 | struct status_block_msix *sblk = cp->status_blk.bnx2; | 3531 | struct status_block_msix *sblk = cp->status_blk.bnx2; |
| 3538 | 3532 | ||
| 3539 | tx_cid = TX_TSS_CID + sb_id - 1; | 3533 | tx_cid = TX_TSS_CID + sb_id - 1; |
| 3540 | cnic_init_context(dev, tx_cid); | ||
| 3541 | CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | | 3534 | CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | |
| 3542 | (TX_TSS_CID << 7)); | 3535 | (TX_TSS_CID << 7)); |
| 3543 | cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; | 3536 | cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; |
| @@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) | |||
| 3556 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; | 3549 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; |
| 3557 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; | 3550 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; |
| 3558 | } else { | 3551 | } else { |
| 3552 | cnic_init_context(dev, tx_cid); | ||
| 3553 | cnic_init_context(dev, tx_cid + 1); | ||
| 3554 | |||
| 3559 | offset0 = BNX2_L2CTX_TYPE; | 3555 | offset0 = BNX2_L2CTX_TYPE; |
| 3560 | offset1 = BNX2_L2CTX_CMD_TYPE; | 3556 | offset1 = BNX2_L2CTX_CMD_TYPE; |
| 3561 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; | 3557 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 110c62072e6f..0c55177db046 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | #ifndef CNIC_IF_H | 12 | #ifndef CNIC_IF_H |
| 13 | #define CNIC_IF_H | 13 | #define CNIC_IF_H |
| 14 | 14 | ||
| 15 | #define CNIC_MODULE_VERSION "2.1.1" | 15 | #define CNIC_MODULE_VERSION "2.1.2" |
| 16 | #define CNIC_MODULE_RELDATE "Feb 22, 2010" | 16 | #define CNIC_MODULE_RELDATE "May 26, 2010" |
| 17 | 17 | ||
| 18 | #define CNIC_ULP_RDMA 0 | 18 | #define CNIC_ULP_RDMA 0 |
| 19 | #define CNIC_ULP_ISCSI 1 | 19 | #define CNIC_ULP_ISCSI 1 |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 326465ffbb23..ddf7a86cd466 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
| @@ -681,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
| 681 | struct phy_device *phy_dev = NULL; | 681 | struct phy_device *phy_dev = NULL; |
| 682 | int phy_addr; | 682 | int phy_addr; |
| 683 | 683 | ||
| 684 | fep->phy_dev = NULL; | ||
| 685 | |||
| 684 | /* find the first phy */ | 686 | /* find the first phy */ |
| 685 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | 687 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { |
| 686 | if (fep->mii_bus->phy_map[phy_addr]) { | 688 | if (fep->mii_bus->phy_map[phy_addr]) { |
| @@ -711,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
| 711 | fep->link = 0; | 713 | fep->link = 0; |
| 712 | fep->full_duplex = 0; | 714 | fep->full_duplex = 0; |
| 713 | 715 | ||
| 716 | printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " | ||
| 717 | "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, | ||
| 718 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | ||
| 719 | fep->phy_dev->irq); | ||
| 720 | |||
| 714 | return 0; | 721 | return 0; |
| 715 | } | 722 | } |
| 716 | 723 | ||
| @@ -756,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
| 756 | if (mdiobus_register(fep->mii_bus)) | 763 | if (mdiobus_register(fep->mii_bus)) |
| 757 | goto err_out_free_mdio_irq; | 764 | goto err_out_free_mdio_irq; |
| 758 | 765 | ||
| 759 | if (fec_enet_mii_probe(dev) != 0) | ||
| 760 | goto err_out_unregister_bus; | ||
| 761 | |||
| 762 | return 0; | 766 | return 0; |
| 763 | 767 | ||
| 764 | err_out_unregister_bus: | ||
| 765 | mdiobus_unregister(fep->mii_bus); | ||
| 766 | err_out_free_mdio_irq: | 768 | err_out_free_mdio_irq: |
| 767 | kfree(fep->mii_bus->irq); | 769 | kfree(fep->mii_bus->irq); |
| 768 | err_out_free_mdiobus: | 770 | err_out_free_mdiobus: |
| @@ -915,7 +917,12 @@ fec_enet_open(struct net_device *dev) | |||
| 915 | if (ret) | 917 | if (ret) |
| 916 | return ret; | 918 | return ret; |
| 917 | 919 | ||
| 918 | /* schedule a link state check */ | 920 | /* Probe and connect to PHY when open the interface */ |
| 921 | ret = fec_enet_mii_probe(dev); | ||
| 922 | if (ret) { | ||
| 923 | fec_enet_free_buffers(dev); | ||
| 924 | return ret; | ||
| 925 | } | ||
| 919 | phy_start(fep->phy_dev); | 926 | phy_start(fep->phy_dev); |
| 920 | netif_start_queue(dev); | 927 | netif_start_queue(dev); |
| 921 | fep->opened = 1; | 928 | fep->opened = 1; |
| @@ -929,10 +936,12 @@ fec_enet_close(struct net_device *dev) | |||
| 929 | 936 | ||
| 930 | /* Don't know what to do yet. */ | 937 | /* Don't know what to do yet. */ |
| 931 | fep->opened = 0; | 938 | fep->opened = 0; |
| 932 | phy_stop(fep->phy_dev); | ||
| 933 | netif_stop_queue(dev); | 939 | netif_stop_queue(dev); |
| 934 | fec_stop(dev); | 940 | fec_stop(dev); |
| 935 | 941 | ||
| 942 | if (fep->phy_dev) | ||
| 943 | phy_disconnect(fep->phy_dev); | ||
| 944 | |||
| 936 | fec_enet_free_buffers(dev); | 945 | fec_enet_free_buffers(dev); |
| 937 | 946 | ||
| 938 | return 0; | 947 | return 0; |
| @@ -1316,11 +1325,6 @@ fec_probe(struct platform_device *pdev) | |||
| 1316 | if (ret) | 1325 | if (ret) |
| 1317 | goto failed_register; | 1326 | goto failed_register; |
| 1318 | 1327 | ||
| 1319 | printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " | ||
| 1320 | "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, | ||
| 1321 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | ||
| 1322 | fep->phy_dev->irq); | ||
| 1323 | |||
| 1324 | return 0; | 1328 | return 0; |
| 1325 | 1329 | ||
| 1326 | failed_register: | 1330 | failed_register: |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 694132e04af6..4e7d1d0a2340 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
| @@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void) | |||
| 1151 | dev = alloc_netdev(sizeof(struct yam_port), name, | 1151 | dev = alloc_netdev(sizeof(struct yam_port), name, |
| 1152 | yam_setup); | 1152 | yam_setup); |
| 1153 | if (!dev) { | 1153 | if (!dev) { |
| 1154 | printk(KERN_ERR "yam: cannot allocate net device %s\n", | 1154 | pr_err("yam: cannot allocate net device\n"); |
| 1155 | dev->name); | ||
| 1156 | err = -ENOMEM; | 1155 | err = -ENOMEM; |
| 1157 | goto error; | 1156 | goto error; |
| 1158 | } | 1157 | } |
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h index c03358434acb..522abe2ff25a 100644 --- a/drivers/net/ll_temac.h +++ b/drivers/net/ll_temac.h | |||
| @@ -295,6 +295,10 @@ This option defaults to enabled (set) */ | |||
| 295 | 295 | ||
| 296 | #define MULTICAST_CAM_TABLE_NUM 4 | 296 | #define MULTICAST_CAM_TABLE_NUM 4 |
| 297 | 297 | ||
| 298 | /* TEMAC Synthesis features */ | ||
| 299 | #define TEMAC_FEATURE_RX_CSUM (1 << 0) | ||
| 300 | #define TEMAC_FEATURE_TX_CSUM (1 << 1) | ||
| 301 | |||
| 298 | /* TX/RX CURDESC_PTR points to first descriptor */ | 302 | /* TX/RX CURDESC_PTR points to first descriptor */ |
| 299 | /* TX/RX TAILDESC_PTR points to last descriptor in linked list */ | 303 | /* TX/RX TAILDESC_PTR points to last descriptor in linked list */ |
| 300 | 304 | ||
| @@ -353,6 +357,7 @@ struct temac_local { | |||
| 353 | struct mutex indirect_mutex; | 357 | struct mutex indirect_mutex; |
| 354 | u32 options; /* Current options word */ | 358 | u32 options; /* Current options word */ |
| 355 | int last_link; | 359 | int last_link; |
| 360 | unsigned int temac_features; | ||
| 356 | 361 | ||
| 357 | /* Buffer descriptors */ | 362 | /* Buffer descriptors */ |
| 358 | struct cdmac_bd *tx_bd_v; | 363 | struct cdmac_bd *tx_bd_v; |
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index fa7620e28404..52dcc8495647 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
| @@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
| 245 | CHNL_CTRL_IRQ_COAL_EN); | 245 | CHNL_CTRL_IRQ_COAL_EN); |
| 246 | /* 0x10220483 */ | 246 | /* 0x10220483 */ |
| 247 | /* 0x00100483 */ | 247 | /* 0x00100483 */ |
| 248 | lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | | 248 | lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | |
| 249 | CHNL_CTRL_IRQ_EN | | 249 | CHNL_CTRL_IRQ_EN | |
| 250 | CHNL_CTRL_IRQ_DLY_EN | | 250 | CHNL_CTRL_IRQ_DLY_EN | |
| 251 | CHNL_CTRL_IRQ_COAL_EN | | 251 | CHNL_CTRL_IRQ_COAL_EN | |
| @@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev) | |||
| 574 | if (cur_p->app4) | 574 | if (cur_p->app4) |
| 575 | dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); | 575 | dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); |
| 576 | cur_p->app0 = 0; | 576 | cur_p->app0 = 0; |
| 577 | cur_p->app1 = 0; | ||
| 578 | cur_p->app2 = 0; | ||
| 579 | cur_p->app3 = 0; | ||
| 580 | cur_p->app4 = 0; | ||
| 577 | 581 | ||
| 578 | ndev->stats.tx_packets++; | 582 | ndev->stats.tx_packets++; |
| 579 | ndev->stats.tx_bytes += cur_p->len; | 583 | ndev->stats.tx_bytes += cur_p->len; |
| @@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev) | |||
| 589 | netif_wake_queue(ndev); | 593 | netif_wake_queue(ndev); |
| 590 | } | 594 | } |
| 591 | 595 | ||
| 596 | static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) | ||
| 597 | { | ||
| 598 | struct cdmac_bd *cur_p; | ||
| 599 | int tail; | ||
| 600 | |||
| 601 | tail = lp->tx_bd_tail; | ||
| 602 | cur_p = &lp->tx_bd_v[tail]; | ||
| 603 | |||
| 604 | do { | ||
| 605 | if (cur_p->app0) | ||
| 606 | return NETDEV_TX_BUSY; | ||
| 607 | |||
| 608 | tail++; | ||
| 609 | if (tail >= TX_BD_NUM) | ||
| 610 | tail = 0; | ||
| 611 | |||
| 612 | cur_p = &lp->tx_bd_v[tail]; | ||
| 613 | num_frag--; | ||
| 614 | } while (num_frag >= 0); | ||
| 615 | |||
| 616 | return 0; | ||
| 617 | } | ||
| 618 | |||
| 592 | static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | 619 | static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
| 593 | { | 620 | { |
| 594 | struct temac_local *lp = netdev_priv(ndev); | 621 | struct temac_local *lp = netdev_priv(ndev); |
| @@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 603 | start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; | 630 | start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
| 604 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; | 631 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
| 605 | 632 | ||
| 606 | if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { | 633 | if (temac_check_tx_bd_space(lp, num_frag)) { |
| 607 | if (!netif_queue_stopped(ndev)) { | 634 | if (!netif_queue_stopped(ndev)) { |
| 608 | netif_stop_queue(ndev); | 635 | netif_stop_queue(ndev); |
| 609 | return NETDEV_TX_BUSY; | 636 | return NETDEV_TX_BUSY; |
| @@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 613 | 640 | ||
| 614 | cur_p->app0 = 0; | 641 | cur_p->app0 = 0; |
| 615 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 642 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 616 | const struct iphdr *ip = ip_hdr(skb); | 643 | unsigned int csum_start_off = skb_transport_offset(skb); |
| 617 | int length = 0, start = 0, insert = 0; | 644 | unsigned int csum_index_off = csum_start_off + skb->csum_offset; |
| 618 | 645 | ||
| 619 | switch (ip->protocol) { | 646 | cur_p->app0 |= 1; /* TX Checksum Enabled */ |
| 620 | case IPPROTO_TCP: | 647 | cur_p->app1 = (csum_start_off << 16) | csum_index_off; |
| 621 | start = sizeof(struct iphdr) + ETH_HLEN; | 648 | cur_p->app2 = 0; /* initial checksum seed */ |
| 622 | insert = sizeof(struct iphdr) + ETH_HLEN + 16; | ||
| 623 | length = ip->tot_len - sizeof(struct iphdr); | ||
| 624 | break; | ||
| 625 | case IPPROTO_UDP: | ||
| 626 | start = sizeof(struct iphdr) + ETH_HLEN; | ||
| 627 | insert = sizeof(struct iphdr) + ETH_HLEN + 6; | ||
| 628 | length = ip->tot_len - sizeof(struct iphdr); | ||
| 629 | break; | ||
| 630 | default: | ||
| 631 | break; | ||
| 632 | } | ||
| 633 | cur_p->app1 = ((start << 16) | insert); | ||
| 634 | cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr, | ||
| 635 | length, ip->protocol, 0); | ||
| 636 | skb->data[insert] = 0; | ||
| 637 | skb->data[insert + 1] = 0; | ||
| 638 | } | 649 | } |
| 650 | |||
| 639 | cur_p->app0 |= STS_CTRL_APP0_SOP; | 651 | cur_p->app0 |= STS_CTRL_APP0_SOP; |
| 640 | cur_p->len = skb_headlen(skb); | 652 | cur_p->len = skb_headlen(skb); |
| 641 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, | 653 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, |
| @@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev) | |||
| 699 | skb->protocol = eth_type_trans(skb, ndev); | 711 | skb->protocol = eth_type_trans(skb, ndev); |
| 700 | skb->ip_summed = CHECKSUM_NONE; | 712 | skb->ip_summed = CHECKSUM_NONE; |
| 701 | 713 | ||
| 714 | /* if we're doing rx csum offload, set it up */ | ||
| 715 | if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && | ||
| 716 | (skb->protocol == __constant_htons(ETH_P_IP)) && | ||
| 717 | (skb->len > 64)) { | ||
| 718 | |||
| 719 | skb->csum = cur_p->app3 & 0xFFFF; | ||
| 720 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
| 721 | } | ||
| 722 | |||
| 702 | netif_rx(skb); | 723 | netif_rx(skb); |
| 703 | 724 | ||
| 704 | ndev->stats.rx_packets++; | 725 | ndev->stats.rx_packets++; |
| @@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
| 883 | struct temac_local *lp; | 904 | struct temac_local *lp; |
| 884 | struct net_device *ndev; | 905 | struct net_device *ndev; |
| 885 | const void *addr; | 906 | const void *addr; |
| 907 | __be32 *p; | ||
| 886 | int size, rc = 0; | 908 | int size, rc = 0; |
| 887 | 909 | ||
| 888 | /* Init network device structure */ | 910 | /* Init network device structure */ |
| @@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
| 926 | goto nodev; | 948 | goto nodev; |
| 927 | } | 949 | } |
| 928 | 950 | ||
| 951 | /* Setup checksum offload, but default to off if not specified */ | ||
| 952 | lp->temac_features = 0; | ||
| 953 | p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); | ||
| 954 | if (p && be32_to_cpu(*p)) { | ||
| 955 | lp->temac_features |= TEMAC_FEATURE_TX_CSUM; | ||
| 956 | /* Can checksum TCP/UDP over IPv4. */ | ||
| 957 | ndev->features |= NETIF_F_IP_CSUM; | ||
| 958 | } | ||
| 959 | p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); | ||
| 960 | if (p && be32_to_cpu(*p)) | ||
| 961 | lp->temac_features |= TEMAC_FEATURE_RX_CSUM; | ||
| 962 | |||
| 929 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ | 963 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
| 930 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); | 964 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); |
| 931 | if (!np) { | 965 | if (!np) { |
| @@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
| 950 | 984 | ||
| 951 | lp->rx_irq = irq_of_parse_and_map(np, 0); | 985 | lp->rx_irq = irq_of_parse_and_map(np, 0); |
| 952 | lp->tx_irq = irq_of_parse_and_map(np, 1); | 986 | lp->tx_irq = irq_of_parse_and_map(np, 1); |
| 953 | if (!lp->rx_irq || !lp->tx_irq) { | 987 | if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { |
| 954 | dev_err(&op->dev, "could not determine irqs\n"); | 988 | dev_err(&op->dev, "could not determine irqs\n"); |
| 955 | rc = -ENOMEM; | 989 | rc = -ENOMEM; |
| 956 | goto nodev; | 990 | goto nodev; |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 7aaae2d2bd67..80c11d131499 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
| @@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) | |||
| 130 | } | 130 | } |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | #ifdef CONFIG_ACPI_APEI | ||
| 134 | extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); | ||
| 135 | #else | ||
| 136 | static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) | ||
| 137 | { | ||
| 138 | if (pci_dev->__aer_firmware_first_valid) | ||
| 139 | return pci_dev->__aer_firmware_first; | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | #endif | ||
| 143 | |||
| 144 | static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, | ||
| 145 | int enable) | ||
| 146 | { | ||
| 147 | pci_dev->__aer_firmware_first = !!enable; | ||
| 148 | pci_dev->__aer_firmware_first_valid = 1; | ||
| 149 | } | ||
| 133 | #endif /* _AERDRV_H_ */ | 150 | #endif /* _AERDRV_H_ */ |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 04814087658d..f278d7b0d95d 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
| 17 | #include <linux/pci-acpi.h> | 17 | #include <linux/pci-acpi.h> |
| 18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 19 | #include <acpi/apei.h> | ||
| 19 | #include "aerdrv.h" | 20 | #include "aerdrv.h" |
| 20 | 21 | ||
| 21 | /** | 22 | /** |
| @@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev) | |||
| 53 | 54 | ||
| 54 | return 0; | 55 | return 0; |
| 55 | } | 56 | } |
| 57 | |||
| 58 | #ifdef CONFIG_ACPI_APEI | ||
| 59 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, | ||
| 60 | struct pci_dev *pci) | ||
| 61 | { | ||
| 62 | return (0 == pci_domain_nr(pci->bus) && | ||
| 63 | p->bus == pci->bus->number && | ||
| 64 | p->device == PCI_SLOT(pci->devfn) && | ||
| 65 | p->function == PCI_FUNC(pci->devfn)); | ||
| 66 | } | ||
| 67 | |||
| 68 | struct aer_hest_parse_info { | ||
| 69 | struct pci_dev *pci_dev; | ||
| 70 | int firmware_first; | ||
| 71 | }; | ||
| 72 | |||
| 73 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) | ||
| 74 | { | ||
| 75 | struct aer_hest_parse_info *info = data; | ||
| 76 | struct acpi_hest_aer_common *p; | ||
| 77 | u8 pcie_type = 0; | ||
| 78 | u8 bridge = 0; | ||
| 79 | int ff = 0; | ||
| 80 | |||
| 81 | switch (hest_hdr->type) { | ||
| 82 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
| 83 | pcie_type = PCI_EXP_TYPE_ROOT_PORT; | ||
| 84 | break; | ||
| 85 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
| 86 | pcie_type = PCI_EXP_TYPE_ENDPOINT; | ||
| 87 | break; | ||
| 88 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
| 89 | if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) | ||
| 90 | bridge = 1; | ||
| 91 | break; | ||
| 92 | default: | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
| 97 | if (p->flags & ACPI_HEST_GLOBAL) { | ||
| 98 | if ((info->pci_dev->is_pcie && | ||
| 99 | info->pci_dev->pcie_type == pcie_type) || bridge) | ||
| 100 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
| 101 | } else | ||
| 102 | if (hest_match_pci(p, info->pci_dev)) | ||
| 103 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
| 104 | info->firmware_first = ff; | ||
| 105 | |||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | static void aer_set_firmware_first(struct pci_dev *pci_dev) | ||
| 110 | { | ||
| 111 | int rc; | ||
| 112 | struct aer_hest_parse_info info = { | ||
| 113 | .pci_dev = pci_dev, | ||
| 114 | .firmware_first = 0, | ||
| 115 | }; | ||
| 116 | |||
| 117 | rc = apei_hest_parse(aer_hest_parse, &info); | ||
| 118 | |||
| 119 | if (rc) | ||
| 120 | pci_dev->__aer_firmware_first = 0; | ||
| 121 | else | ||
| 122 | pci_dev->__aer_firmware_first = info.firmware_first; | ||
| 123 | pci_dev->__aer_firmware_first_valid = 1; | ||
| 124 | } | ||
| 125 | |||
| 126 | int pcie_aer_get_firmware_first(struct pci_dev *dev) | ||
| 127 | { | ||
| 128 | if (!dev->__aer_firmware_first_valid) | ||
| 129 | aer_set_firmware_first(dev); | ||
| 130 | return dev->__aer_firmware_first; | ||
| 131 | } | ||
| 132 | #endif | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index df2d686fe3dd..8af4f619bba2 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
| 36 | u16 reg16 = 0; | 36 | u16 reg16 = 0; |
| 37 | int pos; | 37 | int pos; |
| 38 | 38 | ||
| 39 | if (dev->aer_firmware_first) | 39 | if (pcie_aer_get_firmware_first(dev)) |
| 40 | return -EIO; | 40 | return -EIO; |
| 41 | 41 | ||
| 42 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 42 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| @@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) | |||
| 63 | u16 reg16 = 0; | 63 | u16 reg16 = 0; |
| 64 | int pos; | 64 | int pos; |
| 65 | 65 | ||
| 66 | if (dev->aer_firmware_first) | 66 | if (pcie_aer_get_firmware_first(dev)) |
| 67 | return -EIO; | 67 | return -EIO; |
| 68 | 68 | ||
| 69 | pos = pci_pcie_cap(dev); | 69 | pos = pci_pcie_cap(dev); |
| @@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work) | |||
| 771 | */ | 771 | */ |
| 772 | int aer_init(struct pcie_device *dev) | 772 | int aer_init(struct pcie_device *dev) |
| 773 | { | 773 | { |
| 774 | if (dev->port->aer_firmware_first) { | 774 | if (pcie_aer_get_firmware_first(dev->port)) { |
| 775 | dev_printk(KERN_DEBUG, &dev->device, | 775 | dev_printk(KERN_DEBUG, &dev->device, |
| 776 | "PCIe errors handled by platform firmware.\n"); | 776 | "PCIe errors handled by platform firmware.\n"); |
| 777 | goto out; | 777 | goto out; |
| @@ -785,7 +785,7 @@ out: | |||
| 785 | if (forceload) { | 785 | if (forceload) { |
| 786 | dev_printk(KERN_DEBUG, &dev->device, | 786 | dev_printk(KERN_DEBUG, &dev->device, |
| 787 | "aerdrv forceload requested.\n"); | 787 | "aerdrv forceload requested.\n"); |
| 788 | dev->port->aer_firmware_first = 0; | 788 | pcie_aer_force_firmware_first(dev->port, 0); |
| 789 | return 0; | 789 | return 0; |
| 790 | } | 790 | } |
| 791 | return -ENXIO; | 791 | return -ENXIO; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c82548afcd5c..f4adba2d1dd3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
| 12 | #include <linux/pci-aspm.h> | 12 | #include <linux/pci-aspm.h> |
| 13 | #include <acpi/acpi_hest.h> | ||
| 14 | #include "pci.h" | 13 | #include "pci.h" |
| 15 | 14 | ||
| 16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | 15 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
| @@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) | |||
| 904 | pdev->is_hotplug_bridge = 1; | 903 | pdev->is_hotplug_bridge = 1; |
| 905 | } | 904 | } |
| 906 | 905 | ||
| 907 | static void set_pci_aer_firmware_first(struct pci_dev *pdev) | ||
| 908 | { | ||
| 909 | if (acpi_hest_firmware_first_pci(pdev)) | ||
| 910 | pdev->aer_firmware_first = 1; | ||
| 911 | } | ||
| 912 | |||
| 913 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 906 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
| 914 | 907 | ||
| 915 | /** | 908 | /** |
| @@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 939 | dev->multifunction = !!(hdr_type & 0x80); | 932 | dev->multifunction = !!(hdr_type & 0x80); |
| 940 | dev->error_state = pci_channel_io_normal; | 933 | dev->error_state = pci_channel_io_normal; |
| 941 | set_pcie_port_type(dev); | 934 | set_pcie_port_type(dev); |
| 942 | set_pci_aer_firmware_first(dev); | ||
| 943 | 935 | ||
| 944 | list_for_each_entry(slot, &dev->bus->slots, list) | 936 | list_for_each_entry(slot, &dev->bus->slots, list) |
| 945 | if (PCI_SLOT(dev->devfn) == slot->number) | 937 | if (PCI_SLOT(dev->devfn) == slot->number) |
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 566432106cc5..8070e074c739 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
| @@ -1124,7 +1124,7 @@ static void rio_update_route_tables(struct rio_mport *port) | |||
| 1124 | 1124 | ||
| 1125 | /** | 1125 | /** |
| 1126 | * rio_init_em - Initializes RIO Error Management (for switches) | 1126 | * rio_init_em - Initializes RIO Error Management (for switches) |
| 1127 | * @port: Master port associated with the RIO network | 1127 | * @rdev: RIO device |
| 1128 | * | 1128 | * |
| 1129 | * For each enumerated switch, call device-specific error management | 1129 | * For each enumerated switch, call device-specific error management |
| 1130 | * initialization routine (if supplied by the switch driver). | 1130 | * initialization routine (if supplied by the switch driver). |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 777e099a3d8f..08fa453af974 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
| @@ -338,7 +338,7 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) | |||
| 338 | 338 | ||
| 339 | /** | 339 | /** |
| 340 | * rio_request_inb_pwrite - request inbound port-write message service | 340 | * rio_request_inb_pwrite - request inbound port-write message service |
| 341 | * @mport: RIO device to which register inbound port-write callback routine | 341 | * @rdev: RIO device to which register inbound port-write callback routine |
| 342 | * @pwcback: Callback routine to execute when port-write is received | 342 | * @pwcback: Callback routine to execute when port-write is received |
| 343 | * | 343 | * |
| 344 | * Binds a port-write callback function to the RapidIO device. | 344 | * Binds a port-write callback function to the RapidIO device. |
| @@ -385,7 +385,10 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); | |||
| 385 | /** | 385 | /** |
| 386 | * rio_mport_get_physefb - Helper function that returns register offset | 386 | * rio_mport_get_physefb - Helper function that returns register offset |
| 387 | * for Physical Layer Extended Features Block. | 387 | * for Physical Layer Extended Features Block. |
| 388 | * @rdev: RIO device | 388 | * @port: Master port to issue transaction |
| 389 | * @local: Indicate a local master port or remote device access | ||
| 390 | * @destid: Destination ID of the device | ||
| 391 | * @hopcount: Number of switch hops to the device | ||
| 389 | */ | 392 | */ |
| 390 | u32 | 393 | u32 |
| 391 | rio_mport_get_physefb(struct rio_mport *port, int local, | 394 | rio_mport_get_physefb(struct rio_mport *port, int local, |
| @@ -430,7 +433,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local, | |||
| 430 | 433 | ||
| 431 | /** | 434 | /** |
| 432 | * rio_get_comptag - Begin or continue searching for a RIO device by component tag | 435 | * rio_get_comptag - Begin or continue searching for a RIO device by component tag |
| 433 | * @comp_tag: RIO component tad to match | 436 | * @comp_tag: RIO component tag to match |
| 434 | * @from: Previous RIO device found in search, or %NULL for new search | 437 | * @from: Previous RIO device found in search, or %NULL for new search |
| 435 | * | 438 | * |
| 436 | * Iterates through the list of known RIO devices. If a RIO device is | 439 | * Iterates through the list of known RIO devices. If a RIO device is |
| @@ -835,7 +838,6 @@ int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
| 835 | * rio_std_route_clr_table - Clear swotch route table using standard registers | 838 | * rio_std_route_clr_table - Clear swotch route table using standard registers |
| 836 | * defined in RIO specification rev.1.3. | 839 | * defined in RIO specification rev.1.3. |
| 837 | * @mport: Master port to issue transaction | 840 | * @mport: Master port to issue transaction |
| 838 | * @local: Indicate a local master port or remote device access | ||
| 839 | * @destid: Destination ID of the device | 841 | * @destid: Destination ID of the device |
| 840 | * @hopcount: Number of switch hops to the device | 842 | * @hopcount: Number of switch hops to the device |
| 841 | * @table: routing table ID (global or port-specific) | 843 | * @table: routing table ID (global or port-specific) |
diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c index 8dc03837617b..4a789e5361a4 100644 --- a/drivers/serial/s5pv210.c +++ b/drivers/serial/s5pv210.c | |||
| @@ -119,7 +119,7 @@ static int s5p_serial_probe(struct platform_device *pdev) | |||
| 119 | return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]); | 119 | return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static struct platform_driver s5p_serial_drv = { | 122 | static struct platform_driver s5p_serial_driver = { |
| 123 | .probe = s5p_serial_probe, | 123 | .probe = s5p_serial_probe, |
| 124 | .remove = __devexit_p(s3c24xx_serial_remove), | 124 | .remove = __devexit_p(s3c24xx_serial_remove), |
| 125 | .driver = { | 125 | .driver = { |
| @@ -130,19 +130,19 @@ static struct platform_driver s5p_serial_drv = { | |||
| 130 | 130 | ||
| 131 | static int __init s5pv210_serial_console_init(void) | 131 | static int __init s5pv210_serial_console_init(void) |
| 132 | { | 132 | { |
| 133 | return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf); | 133 | return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | console_initcall(s5pv210_serial_console_init); | 136 | console_initcall(s5pv210_serial_console_init); |
| 137 | 137 | ||
| 138 | static int __init s5p_serial_init(void) | 138 | static int __init s5p_serial_init(void) |
| 139 | { | 139 | { |
| 140 | return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf); | 140 | return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void __exit s5p_serial_exit(void) | 143 | static void __exit s5p_serial_exit(void) |
| 144 | { | 144 | { |
| 145 | platform_driver_unregister(&s5p_serial_drv); | 145 | platform_driver_unregister(&s5p_serial_driver); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | module_init(s5p_serial_init); | 148 | module_init(s5p_serial_init); |
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 9286e863b0e7..643b413d9f0f 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 30 | #include <linux/statfs.h> | 30 | #include <linux/statfs.h> |
| 31 | #include <linux/writeback.h> | 31 | #include <linux/writeback.h> |
| 32 | #include <linux/quotaops.h> | ||
| 33 | 32 | ||
| 34 | #include "netfs.h" | 33 | #include "netfs.h" |
| 35 | 34 | ||
| @@ -880,7 +879,7 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb) | |||
| 880 | /* | 879 | /* |
| 881 | * We want fsync() to work on POHMELFS. | 880 | * We want fsync() to work on POHMELFS. |
| 882 | */ | 881 | */ |
| 883 | static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync) | 882 | static int pohmelfs_fsync(struct file *file, int datasync) |
| 884 | { | 883 | { |
| 885 | struct inode *inode = file->f_mapping->host; | 884 | struct inode *inode = file->f_mapping->host; |
| 886 | struct writeback_control wbc = { | 885 | struct writeback_control wbc = { |
| @@ -969,13 +968,6 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr) | |||
| 969 | goto err_out_exit; | 968 | goto err_out_exit; |
| 970 | } | 969 | } |
| 971 | 970 | ||
| 972 | if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | ||
| 973 | (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | ||
| 974 | err = dquot_transfer(inode, attr); | ||
| 975 | if (err) | ||
| 976 | goto err_out_exit; | ||
| 977 | } | ||
| 978 | |||
| 979 | err = inode_setattr(inode, attr); | 971 | err = inode_setattr(inode, attr); |
| 980 | if (err) { | 972 | if (err) { |
| 981 | dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); | 973 | dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); |
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 6b8bf8c781c4..43abf55d8c60 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
| @@ -794,7 +794,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 794 | } | 794 | } |
| 795 | 795 | ||
| 796 | static int | 796 | static int |
| 797 | printer_fsync(struct file *fd, struct dentry *dentry, int datasync) | 797 | printer_fsync(struct file *fd, int datasync) |
| 798 | { | 798 | { |
| 799 | struct printer_dev *dev = fd->private_data; | 799 | struct printer_dev *dev = fd->private_data; |
| 800 | unsigned long flags; | 800 | unsigned long flags; |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index aa88911c9504..0f41c9195e9b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | |||
| 593 | int r; | 593 | int r; |
| 594 | switch (ioctl) { | 594 | switch (ioctl) { |
| 595 | case VHOST_NET_SET_BACKEND: | 595 | case VHOST_NET_SET_BACKEND: |
| 596 | r = copy_from_user(&backend, argp, sizeof backend); | 596 | if (copy_from_user(&backend, argp, sizeof backend)) |
| 597 | if (r < 0) | 597 | return -EFAULT; |
| 598 | return r; | ||
| 599 | return vhost_net_set_backend(n, backend.index, backend.fd); | 598 | return vhost_net_set_backend(n, backend.index, backend.fd); |
| 600 | case VHOST_GET_FEATURES: | 599 | case VHOST_GET_FEATURES: |
| 601 | features = VHOST_FEATURES; | 600 | features = VHOST_FEATURES; |
| 602 | return copy_to_user(featurep, &features, sizeof features); | 601 | if (copy_to_user(featurep, &features, sizeof features)) |
| 602 | return -EFAULT; | ||
| 603 | return 0; | ||
| 603 | case VHOST_SET_FEATURES: | 604 | case VHOST_SET_FEATURES: |
| 604 | r = copy_from_user(&features, featurep, sizeof features); | 605 | if (copy_from_user(&features, featurep, sizeof features)) |
| 605 | if (r < 0) | 606 | return -EFAULT; |
| 606 | return r; | ||
| 607 | if (features & ~VHOST_FEATURES) | 607 | if (features & ~VHOST_FEATURES) |
| 608 | return -EOPNOTSUPP; | 608 | return -EOPNOTSUPP; |
| 609 | return vhost_net_set_features(n, features); | 609 | return vhost_net_set_features(n, features); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c6fb8e968f21..3b83382e06eb 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
| 320 | { | 320 | { |
| 321 | struct vhost_memory mem, *newmem, *oldmem; | 321 | struct vhost_memory mem, *newmem, *oldmem; |
| 322 | unsigned long size = offsetof(struct vhost_memory, regions); | 322 | unsigned long size = offsetof(struct vhost_memory, regions); |
| 323 | long r; | 323 | if (copy_from_user(&mem, m, size)) |
| 324 | r = copy_from_user(&mem, m, size); | 324 | return -EFAULT; |
| 325 | if (r) | ||
| 326 | return r; | ||
| 327 | if (mem.padding) | 325 | if (mem.padding) |
| 328 | return -EOPNOTSUPP; | 326 | return -EOPNOTSUPP; |
| 329 | if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) | 327 | if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) |
| @@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
| 333 | return -ENOMEM; | 331 | return -ENOMEM; |
| 334 | 332 | ||
| 335 | memcpy(newmem, &mem, size); | 333 | memcpy(newmem, &mem, size); |
| 336 | r = copy_from_user(newmem->regions, m->regions, | 334 | if (copy_from_user(newmem->regions, m->regions, |
| 337 | mem.nregions * sizeof *m->regions); | 335 | mem.nregions * sizeof *m->regions)) { |
| 338 | if (r) { | ||
| 339 | kfree(newmem); | 336 | kfree(newmem); |
| 340 | return r; | 337 | return -EFAULT; |
| 341 | } | 338 | } |
| 342 | 339 | ||
| 343 | if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) | 340 | if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { |
| 341 | kfree(newmem); | ||
| 344 | return -EFAULT; | 342 | return -EFAULT; |
| 343 | } | ||
| 345 | oldmem = d->memory; | 344 | oldmem = d->memory; |
| 346 | rcu_assign_pointer(d->memory, newmem); | 345 | rcu_assign_pointer(d->memory, newmem); |
| 347 | synchronize_rcu(); | 346 | synchronize_rcu(); |
| @@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 374 | r = get_user(idx, idxp); | 373 | r = get_user(idx, idxp); |
| 375 | if (r < 0) | 374 | if (r < 0) |
| 376 | return r; | 375 | return r; |
| 377 | if (idx > d->nvqs) | 376 | if (idx >= d->nvqs) |
| 378 | return -ENOBUFS; | 377 | return -ENOBUFS; |
| 379 | 378 | ||
| 380 | vq = d->vqs + idx; | 379 | vq = d->vqs + idx; |
| @@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 389 | r = -EBUSY; | 388 | r = -EBUSY; |
| 390 | break; | 389 | break; |
| 391 | } | 390 | } |
| 392 | r = copy_from_user(&s, argp, sizeof s); | 391 | if (copy_from_user(&s, argp, sizeof s)) { |
| 393 | if (r < 0) | 392 | r = -EFAULT; |
| 394 | break; | 393 | break; |
| 394 | } | ||
| 395 | if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { | 395 | if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { |
| 396 | r = -EINVAL; | 396 | r = -EINVAL; |
| 397 | break; | 397 | break; |
| @@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 405 | r = -EBUSY; | 405 | r = -EBUSY; |
| 406 | break; | 406 | break; |
| 407 | } | 407 | } |
| 408 | r = copy_from_user(&s, argp, sizeof s); | 408 | if (copy_from_user(&s, argp, sizeof s)) { |
| 409 | if (r < 0) | 409 | r = -EFAULT; |
| 410 | break; | 410 | break; |
| 411 | } | ||
| 411 | if (s.num > 0xffff) { | 412 | if (s.num > 0xffff) { |
| 412 | r = -EINVAL; | 413 | r = -EINVAL; |
| 413 | break; | 414 | break; |
| @@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 419 | case VHOST_GET_VRING_BASE: | 420 | case VHOST_GET_VRING_BASE: |
| 420 | s.index = idx; | 421 | s.index = idx; |
| 421 | s.num = vq->last_avail_idx; | 422 | s.num = vq->last_avail_idx; |
| 422 | r = copy_to_user(argp, &s, sizeof s); | 423 | if (copy_to_user(argp, &s, sizeof s)) |
| 424 | r = -EFAULT; | ||
| 423 | break; | 425 | break; |
| 424 | case VHOST_SET_VRING_ADDR: | 426 | case VHOST_SET_VRING_ADDR: |
| 425 | r = copy_from_user(&a, argp, sizeof a); | 427 | if (copy_from_user(&a, argp, sizeof a)) { |
| 426 | if (r < 0) | 428 | r = -EFAULT; |
| 427 | break; | 429 | break; |
| 430 | } | ||
| 428 | if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { | 431 | if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { |
| 429 | r = -EOPNOTSUPP; | 432 | r = -EOPNOTSUPP; |
| 430 | break; | 433 | break; |
| @@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 477 | vq->used = (void __user *)(unsigned long)a.used_user_addr; | 480 | vq->used = (void __user *)(unsigned long)a.used_user_addr; |
| 478 | break; | 481 | break; |
| 479 | case VHOST_SET_VRING_KICK: | 482 | case VHOST_SET_VRING_KICK: |
| 480 | r = copy_from_user(&f, argp, sizeof f); | 483 | if (copy_from_user(&f, argp, sizeof f)) { |
| 481 | if (r < 0) | 484 | r = -EFAULT; |
| 482 | break; | 485 | break; |
| 486 | } | ||
| 483 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 487 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
| 484 | if (IS_ERR(eventfp)) { | 488 | if (IS_ERR(eventfp)) { |
| 485 | r = PTR_ERR(eventfp); | 489 | r = PTR_ERR(eventfp); |
| @@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 492 | filep = eventfp; | 496 | filep = eventfp; |
| 493 | break; | 497 | break; |
| 494 | case VHOST_SET_VRING_CALL: | 498 | case VHOST_SET_VRING_CALL: |
| 495 | r = copy_from_user(&f, argp, sizeof f); | 499 | if (copy_from_user(&f, argp, sizeof f)) { |
| 496 | if (r < 0) | 500 | r = -EFAULT; |
| 497 | break; | 501 | break; |
| 502 | } | ||
| 498 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 503 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
| 499 | if (IS_ERR(eventfp)) { | 504 | if (IS_ERR(eventfp)) { |
| 500 | r = PTR_ERR(eventfp); | 505 | r = PTR_ERR(eventfp); |
| @@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 510 | filep = eventfp; | 515 | filep = eventfp; |
| 511 | break; | 516 | break; |
| 512 | case VHOST_SET_VRING_ERR: | 517 | case VHOST_SET_VRING_ERR: |
| 513 | r = copy_from_user(&f, argp, sizeof f); | 518 | if (copy_from_user(&f, argp, sizeof f)) { |
| 514 | if (r < 0) | 519 | r = -EFAULT; |
| 515 | break; | 520 | break; |
| 521 | } | ||
| 516 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 522 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
| 517 | if (IS_ERR(eventfp)) { | 523 | if (IS_ERR(eventfp)) { |
| 518 | r = PTR_ERR(eventfp); | 524 | r = PTR_ERR(eventfp); |
| @@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) | |||
| 575 | r = vhost_set_memory(d, argp); | 581 | r = vhost_set_memory(d, argp); |
| 576 | break; | 582 | break; |
| 577 | case VHOST_SET_LOG_BASE: | 583 | case VHOST_SET_LOG_BASE: |
| 578 | r = copy_from_user(&p, argp, sizeof p); | 584 | if (copy_from_user(&p, argp, sizeof p)) { |
| 579 | if (r < 0) | 585 | r = -EFAULT; |
| 580 | break; | 586 | break; |
| 587 | } | ||
| 581 | if ((u64)(unsigned long)p != p) { | 588 | if ((u64)(unsigned long)p != p) { |
| 582 | r = -EFAULT; | 589 | r = -EFAULT; |
| 583 | break; | 590 | break; |
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 1105a591dcc1..073c9b408cf7 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c | |||
| @@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, | |||
| 66 | return 0; | 66 | return 0; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) | 69 | int fb_deferred_io_fsync(struct file *file, int datasync) |
| 70 | { | 70 | { |
| 71 | struct fb_info *info = file->private_data; | 71 | struct fb_info *info = file->private_data; |
| 72 | 72 | ||
