diff options
Diffstat (limited to 'drivers')
335 files changed, 6379 insertions, 3771 deletions
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 67340cc70142..257706e7734f 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c | |||
@@ -70,6 +70,12 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, | |||
70 | 70 | ||
71 | ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); | 71 | ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); |
72 | 72 | ||
73 | /* If Source and Target are the same, just return */ | ||
74 | |||
75 | if (source_desc == target_desc) { | ||
76 | return_ACPI_STATUS(AE_OK); | ||
77 | } | ||
78 | |||
73 | /* We know that source_desc is a buffer by now */ | 79 | /* We know that source_desc is a buffer by now */ |
74 | 80 | ||
75 | buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); | 81 | buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); |
@@ -161,6 +167,12 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, | |||
161 | 167 | ||
162 | ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); | 168 | ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); |
163 | 169 | ||
170 | /* If Source and Target are the same, just return */ | ||
171 | |||
172 | if (source_desc == target_desc) { | ||
173 | return_ACPI_STATUS(AE_OK); | ||
174 | } | ||
175 | |||
164 | /* We know that source_desc is a string by now */ | 176 | /* We know that source_desc is a string by now */ |
165 | 177 | ||
166 | buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); | 178 | buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index f6baa77deefb..0c4ca4d318b3 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -78,9 +78,10 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { | |||
78 | 78 | ||
79 | static int __init blacklist_by_year(void) | 79 | static int __init blacklist_by_year(void) |
80 | { | 80 | { |
81 | int year = dmi_get_year(DMI_BIOS_DATE); | 81 | int year; |
82 | |||
82 | /* Doesn't exist? Likely an old system */ | 83 | /* Doesn't exist? Likely an old system */ |
83 | if (year == -1) { | 84 | if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { |
84 | printk(KERN_ERR PREFIX "no DMI BIOS year, " | 85 | printk(KERN_ERR PREFIX "no DMI BIOS year, " |
85 | "acpi=force is required to enable ACPI\n" ); | 86 | "acpi=force is required to enable ACPI\n" ); |
86 | return 1; | 87 | return 1; |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 84e0f3c07442..2cc4b3033872 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void) | |||
1151 | { | 1151 | { |
1152 | int result = 0; | 1152 | int result = 0; |
1153 | 1153 | ||
1154 | if (acpi_disabled) | ||
1155 | return 0; | ||
1156 | |||
1154 | memset(&errata, 0, sizeof(errata)); | 1157 | memset(&errata, 0, sizeof(errata)); |
1155 | 1158 | ||
1156 | #ifdef CONFIG_SMP | 1159 | #ifdef CONFIG_SMP |
@@ -1197,6 +1200,9 @@ out_proc: | |||
1197 | 1200 | ||
1198 | static void __exit acpi_processor_exit(void) | 1201 | static void __exit acpi_processor_exit(void) |
1199 | { | 1202 | { |
1203 | if (acpi_disabled) | ||
1204 | return; | ||
1205 | |||
1200 | acpi_processor_ppc_exit(); | 1206 | acpi_processor_ppc_exit(); |
1201 | 1207 | ||
1202 | acpi_thermal_cpufreq_exit(); | 1208 | acpi_thermal_cpufreq_exit(); |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0efa59e7e3af..66393d5c4c7c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, | |||
162 | pr->power.timer_broadcast_on_state = state; | 162 | pr->power.timer_broadcast_on_state = state; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) | 165 | static void lapic_timer_propagate_broadcast(void *arg) |
166 | { | 166 | { |
167 | struct acpi_processor *pr = (struct acpi_processor *) arg; | ||
167 | unsigned long reason; | 168 | unsigned long reason; |
168 | 169 | ||
169 | reason = pr->power.timer_broadcast_on_state < INT_MAX ? | 170 | reason = pr->power.timer_broadcast_on_state < INT_MAX ? |
@@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
635 | working++; | 636 | working++; |
636 | } | 637 | } |
637 | 638 | ||
638 | lapic_timer_propagate_broadcast(pr); | 639 | smp_call_function_single(pr->id, lapic_timer_propagate_broadcast, |
640 | pr, 1); | ||
639 | 641 | ||
640 | return (working); | 642 | return (working); |
641 | } | 643 | } |
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 39838c666032..31adda1099e0 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr) | |||
66 | if (pr->limit.thermal.tx > tx) | 66 | if (pr->limit.thermal.tx > tx) |
67 | tx = pr->limit.thermal.tx; | 67 | tx = pr->limit.thermal.tx; |
68 | 68 | ||
69 | result = acpi_processor_set_throttling(pr, tx); | 69 | result = acpi_processor_set_throttling(pr, tx, false); |
70 | if (result) | 70 | if (result) |
71 | goto end; | 71 | goto end; |
72 | } | 72 | } |
@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, | |||
421 | 421 | ||
422 | if (state <= max_pstate) { | 422 | if (state <= max_pstate) { |
423 | if (pr->flags.throttling && pr->throttling.state) | 423 | if (pr->flags.throttling && pr->throttling.state) |
424 | result = acpi_processor_set_throttling(pr, 0); | 424 | result = acpi_processor_set_throttling(pr, 0, false); |
425 | cpufreq_set_cur_state(pr->id, state); | 425 | cpufreq_set_cur_state(pr->id, state); |
426 | } else { | 426 | } else { |
427 | cpufreq_set_cur_state(pr->id, max_pstate); | 427 | cpufreq_set_cur_state(pr->id, max_pstate); |
428 | result = acpi_processor_set_throttling(pr, | 428 | result = acpi_processor_set_throttling(pr, |
429 | state - max_pstate); | 429 | state - max_pstate, false); |
430 | } | 430 | } |
431 | return result; | 431 | return result; |
432 | } | 432 | } |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 227543789ba9..ae39797aab55 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -62,7 +62,8 @@ struct throttling_tstate { | |||
62 | #define THROTTLING_POSTCHANGE (2) | 62 | #define THROTTLING_POSTCHANGE (2) |
63 | 63 | ||
64 | static int acpi_processor_get_throttling(struct acpi_processor *pr); | 64 | static int acpi_processor_get_throttling(struct acpi_processor *pr); |
65 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state); | 65 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
66 | int state, bool force); | ||
66 | 67 | ||
67 | static int acpi_processor_update_tsd_coord(void) | 68 | static int acpi_processor_update_tsd_coord(void) |
68 | { | 69 | { |
@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) | |||
361 | */ | 362 | */ |
362 | target_state = throttling_limit; | 363 | target_state = throttling_limit; |
363 | } | 364 | } |
364 | return acpi_processor_set_throttling(pr, target_state); | 365 | return acpi_processor_set_throttling(pr, target_state, false); |
365 | } | 366 | } |
366 | 367 | ||
367 | /* | 368 | /* |
@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
839 | if (ret >= 0) { | 840 | if (ret >= 0) { |
840 | state = acpi_get_throttling_state(pr, value); | 841 | state = acpi_get_throttling_state(pr, value); |
841 | if (state == -1) { | 842 | if (state == -1) { |
842 | ACPI_WARNING((AE_INFO, | 843 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
843 | "Invalid throttling state, reset")); | 844 | "Invalid throttling state, reset\n")); |
844 | state = 0; | 845 | state = 0; |
845 | ret = acpi_processor_set_throttling(pr, state); | 846 | ret = acpi_processor_set_throttling(pr, state, true); |
846 | if (ret) | 847 | if (ret) |
847 | return ret; | 848 | return ret; |
848 | } | 849 | } |
@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) | |||
915 | } | 916 | } |
916 | 917 | ||
917 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | 918 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
918 | int state) | 919 | int state, bool force) |
919 | { | 920 | { |
920 | u32 value = 0; | 921 | u32 value = 0; |
921 | u32 duty_mask = 0; | 922 | u32 duty_mask = 0; |
@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | |||
930 | if (!pr->flags.throttling) | 931 | if (!pr->flags.throttling) |
931 | return -ENODEV; | 932 | return -ENODEV; |
932 | 933 | ||
933 | if (state == pr->throttling.state) | 934 | if (!force && (state == pr->throttling.state)) |
934 | return 0; | 935 | return 0; |
935 | 936 | ||
936 | if (state < pr->throttling_platform_limit) | 937 | if (state < pr->throttling_platform_limit) |
@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | |||
988 | } | 989 | } |
989 | 990 | ||
990 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | 991 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
991 | int state) | 992 | int state, bool force) |
992 | { | 993 | { |
993 | int ret; | 994 | int ret; |
994 | acpi_integer value; | 995 | acpi_integer value; |
@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
1002 | if (!pr->flags.throttling) | 1003 | if (!pr->flags.throttling) |
1003 | return -ENODEV; | 1004 | return -ENODEV; |
1004 | 1005 | ||
1005 | if (state == pr->throttling.state) | 1006 | if (!force && (state == pr->throttling.state)) |
1006 | return 0; | 1007 | return 0; |
1007 | 1008 | ||
1008 | if (state < pr->throttling_platform_limit) | 1009 | if (state < pr->throttling_platform_limit) |
@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
1018 | return 0; | 1019 | return 0; |
1019 | } | 1020 | } |
1020 | 1021 | ||
1021 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 1022 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
1023 | int state, bool force) | ||
1022 | { | 1024 | { |
1023 | cpumask_var_t saved_mask; | 1025 | cpumask_var_t saved_mask; |
1024 | int ret = 0; | 1026 | int ret = 0; |
@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1070 | /* FIXME: use work_on_cpu() */ | 1072 | /* FIXME: use work_on_cpu() */ |
1071 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 1073 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); |
1072 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1074 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1073 | t_state.target_state); | 1075 | t_state.target_state, force); |
1074 | } else { | 1076 | } else { |
1075 | /* | 1077 | /* |
1076 | * When the T-state coordination is SW_ALL or HW_ALL, | 1078 | * When the T-state coordination is SW_ALL or HW_ALL, |
@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1103 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 1105 | set_cpus_allowed_ptr(current, cpumask_of(i)); |
1104 | ret = match_pr->throttling. | 1106 | ret = match_pr->throttling. |
1105 | acpi_processor_set_throttling( | 1107 | acpi_processor_set_throttling( |
1106 | match_pr, t_state.target_state); | 1108 | match_pr, t_state.target_state, force); |
1107 | } | 1109 | } |
1108 | } | 1110 | } |
1109 | /* | 1111 | /* |
@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
1201 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 1203 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
1202 | "Disabling throttling (was T%d)\n", | 1204 | "Disabling throttling (was T%d)\n", |
1203 | pr->throttling.state)); | 1205 | pr->throttling.state)); |
1204 | result = acpi_processor_set_throttling(pr, 0); | 1206 | result = acpi_processor_set_throttling(pr, 0, false); |
1205 | if (result) | 1207 | if (result) |
1206 | goto end; | 1208 | goto end; |
1207 | } | 1209 | } |
@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, | |||
1307 | if (strcmp(tmpbuf, charp) != 0) | 1309 | if (strcmp(tmpbuf, charp) != 0) |
1308 | return -EINVAL; | 1310 | return -EINVAL; |
1309 | 1311 | ||
1310 | result = acpi_processor_set_throttling(pr, state_val); | 1312 | result = acpi_processor_set_throttling(pr, state_val, false); |
1311 | if (result) | 1313 | if (result) |
1312 | return result; | 1314 | return result; |
1313 | 1315 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8851315ce858..60ea984c84a0 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -2004,8 +2004,11 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
2004 | status = acpi_remove_notify_handler(device->dev->handle, | 2004 | status = acpi_remove_notify_handler(device->dev->handle, |
2005 | ACPI_DEVICE_NOTIFY, | 2005 | ACPI_DEVICE_NOTIFY, |
2006 | acpi_video_device_notify); | 2006 | acpi_video_device_notify); |
2007 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); | 2007 | if (device->backlight) { |
2008 | backlight_device_unregister(device->backlight); | 2008 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); |
2009 | backlight_device_unregister(device->backlight); | ||
2010 | device->backlight = NULL; | ||
2011 | } | ||
2009 | if (device->cdev) { | 2012 | if (device->cdev) { |
2010 | sysfs_remove_link(&device->dev->dev.kobj, | 2013 | sysfs_remove_link(&device->dev->dev.kobj, |
2011 | "thermal_cooling"); | 2014 | "thermal_cooling"); |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index b17c57f85032..ab2fa4eeb364 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -26,6 +26,17 @@ config ATA_NONSTANDARD | |||
26 | bool | 26 | bool |
27 | default n | 27 | default n |
28 | 28 | ||
29 | config ATA_VERBOSE_ERROR | ||
30 | bool "Verbose ATA error reporting" | ||
31 | default y | ||
32 | help | ||
33 | This option adds parsing of ATA command descriptions and error bits | ||
34 | in libata kernel output, making it easier to interpret. | ||
35 | This option will enlarge the kernel by approx. 6KB. Disable it only | ||
36 | if kernel size is more important than ease of debugging. | ||
37 | |||
38 | If unsure, say Y. | ||
39 | |||
29 | config ATA_ACPI | 40 | config ATA_ACPI |
30 | bool "ATA ACPI Support" | 41 | bool "ATA ACPI Support" |
31 | depends on ACPI && PCI | 42 | depends on ACPI && PCI |
@@ -586,6 +597,16 @@ config PATA_RB532 | |||
586 | 597 | ||
587 | If unsure, say N. | 598 | If unsure, say N. |
588 | 599 | ||
600 | config PATA_RDC | ||
601 | tristate "RDC PATA support" | ||
602 | depends on PCI | ||
603 | help | ||
604 | This option enables basic support for the later RDC PATA controllers | ||
605 | controllers via the new ATA layer. For the RDC 1010, you need to | ||
606 | enable the IT821X driver instead. | ||
607 | |||
608 | If unsure, say N. | ||
609 | |||
589 | config PATA_RZ1000 | 610 | config PATA_RZ1000 |
590 | tristate "PC Tech RZ1000 PATA support" | 611 | tristate "PC Tech RZ1000 PATA support" |
591 | depends on PCI | 612 | depends on PCI |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 38906f9bbb4e..463eb52236aa 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -57,6 +57,7 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o | |||
57 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o | 57 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o |
58 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o | 58 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o |
59 | obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o | 59 | obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o |
60 | obj-$(CONFIG_PATA_RDC) += pata_rdc.o | ||
60 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o | 61 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o |
61 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o | 62 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o |
62 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o | 63 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 958c1fa41900..d4cd9c203314 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -219,6 +219,8 @@ enum { | |||
219 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | 219 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ |
220 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ | 220 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ |
221 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ | 221 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ |
222 | AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as | ||
223 | link offline */ | ||
222 | 224 | ||
223 | /* ap->flags bits */ | 225 | /* ap->flags bits */ |
224 | 226 | ||
@@ -327,10 +329,24 @@ static ssize_t ahci_activity_store(struct ata_device *dev, | |||
327 | enum sw_activity val); | 329 | enum sw_activity val); |
328 | static void ahci_init_sw_activity(struct ata_link *link); | 330 | static void ahci_init_sw_activity(struct ata_link *link); |
329 | 331 | ||
332 | static ssize_t ahci_show_host_caps(struct device *dev, | ||
333 | struct device_attribute *attr, char *buf); | ||
334 | static ssize_t ahci_show_host_version(struct device *dev, | ||
335 | struct device_attribute *attr, char *buf); | ||
336 | static ssize_t ahci_show_port_cmd(struct device *dev, | ||
337 | struct device_attribute *attr, char *buf); | ||
338 | |||
339 | DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); | ||
340 | DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); | ||
341 | DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); | ||
342 | |||
330 | static struct device_attribute *ahci_shost_attrs[] = { | 343 | static struct device_attribute *ahci_shost_attrs[] = { |
331 | &dev_attr_link_power_management_policy, | 344 | &dev_attr_link_power_management_policy, |
332 | &dev_attr_em_message_type, | 345 | &dev_attr_em_message_type, |
333 | &dev_attr_em_message, | 346 | &dev_attr_em_message, |
347 | &dev_attr_ahci_host_caps, | ||
348 | &dev_attr_ahci_host_version, | ||
349 | &dev_attr_ahci_port_cmd, | ||
334 | NULL | 350 | NULL |
335 | }; | 351 | }; |
336 | 352 | ||
@@ -537,6 +553,12 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
537 | { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ | 553 | { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ |
538 | { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ | 554 | { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ |
539 | 555 | ||
556 | /* AMD */ | ||
557 | { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */ | ||
558 | /* AMD is using RAID class only for ahci controllers */ | ||
559 | { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | ||
560 | PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, | ||
561 | |||
540 | /* VIA */ | 562 | /* VIA */ |
541 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ | 563 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ |
542 | { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ | 564 | { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ |
@@ -700,6 +722,36 @@ static void ahci_enable_ahci(void __iomem *mmio) | |||
700 | WARN_ON(1); | 722 | WARN_ON(1); |
701 | } | 723 | } |
702 | 724 | ||
725 | static ssize_t ahci_show_host_caps(struct device *dev, | ||
726 | struct device_attribute *attr, char *buf) | ||
727 | { | ||
728 | struct Scsi_Host *shost = class_to_shost(dev); | ||
729 | struct ata_port *ap = ata_shost_to_port(shost); | ||
730 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
731 | |||
732 | return sprintf(buf, "%x\n", hpriv->cap); | ||
733 | } | ||
734 | |||
735 | static ssize_t ahci_show_host_version(struct device *dev, | ||
736 | struct device_attribute *attr, char *buf) | ||
737 | { | ||
738 | struct Scsi_Host *shost = class_to_shost(dev); | ||
739 | struct ata_port *ap = ata_shost_to_port(shost); | ||
740 | void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; | ||
741 | |||
742 | return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); | ||
743 | } | ||
744 | |||
745 | static ssize_t ahci_show_port_cmd(struct device *dev, | ||
746 | struct device_attribute *attr, char *buf) | ||
747 | { | ||
748 | struct Scsi_Host *shost = class_to_shost(dev); | ||
749 | struct ata_port *ap = ata_shost_to_port(shost); | ||
750 | void __iomem *port_mmio = ahci_port_base(ap); | ||
751 | |||
752 | return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); | ||
753 | } | ||
754 | |||
703 | /** | 755 | /** |
704 | * ahci_save_initial_config - Save and fixup initial config values | 756 | * ahci_save_initial_config - Save and fixup initial config values |
705 | * @pdev: target PCI device | 757 | * @pdev: target PCI device |
@@ -1582,7 +1634,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | |||
1582 | pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); | 1634 | pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); |
1583 | } | 1635 | } |
1584 | 1636 | ||
1585 | static int ahci_kick_engine(struct ata_port *ap, int force_restart) | 1637 | static int ahci_kick_engine(struct ata_port *ap) |
1586 | { | 1638 | { |
1587 | void __iomem *port_mmio = ahci_port_base(ap); | 1639 | void __iomem *port_mmio = ahci_port_base(ap); |
1588 | struct ahci_host_priv *hpriv = ap->host->private_data; | 1640 | struct ahci_host_priv *hpriv = ap->host->private_data; |
@@ -1590,18 +1642,16 @@ static int ahci_kick_engine(struct ata_port *ap, int force_restart) | |||
1590 | u32 tmp; | 1642 | u32 tmp; |
1591 | int busy, rc; | 1643 | int busy, rc; |
1592 | 1644 | ||
1593 | /* do we need to kick the port? */ | ||
1594 | busy = status & (ATA_BUSY | ATA_DRQ); | ||
1595 | if (!busy && !force_restart) | ||
1596 | return 0; | ||
1597 | |||
1598 | /* stop engine */ | 1645 | /* stop engine */ |
1599 | rc = ahci_stop_engine(ap); | 1646 | rc = ahci_stop_engine(ap); |
1600 | if (rc) | 1647 | if (rc) |
1601 | goto out_restart; | 1648 | goto out_restart; |
1602 | 1649 | ||
1603 | /* need to do CLO? */ | 1650 | /* need to do CLO? |
1604 | if (!busy) { | 1651 | * always do CLO if PMP is attached (AHCI-1.3 9.2) |
1652 | */ | ||
1653 | busy = status & (ATA_BUSY | ATA_DRQ); | ||
1654 | if (!busy && !sata_pmp_attached(ap)) { | ||
1605 | rc = 0; | 1655 | rc = 0; |
1606 | goto out_restart; | 1656 | goto out_restart; |
1607 | } | 1657 | } |
@@ -1649,7 +1699,7 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
1649 | tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, | 1699 | tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, |
1650 | 1, timeout_msec); | 1700 | 1, timeout_msec); |
1651 | if (tmp & 0x1) { | 1701 | if (tmp & 0x1) { |
1652 | ahci_kick_engine(ap, 1); | 1702 | ahci_kick_engine(ap); |
1653 | return -EBUSY; | 1703 | return -EBUSY; |
1654 | } | 1704 | } |
1655 | } else | 1705 | } else |
@@ -1663,6 +1713,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1663 | int (*check_ready)(struct ata_link *link)) | 1713 | int (*check_ready)(struct ata_link *link)) |
1664 | { | 1714 | { |
1665 | struct ata_port *ap = link->ap; | 1715 | struct ata_port *ap = link->ap; |
1716 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
1666 | const char *reason = NULL; | 1717 | const char *reason = NULL; |
1667 | unsigned long now, msecs; | 1718 | unsigned long now, msecs; |
1668 | struct ata_taskfile tf; | 1719 | struct ata_taskfile tf; |
@@ -1671,7 +1722,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1671 | DPRINTK("ENTER\n"); | 1722 | DPRINTK("ENTER\n"); |
1672 | 1723 | ||
1673 | /* prepare for SRST (AHCI-1.1 10.4.1) */ | 1724 | /* prepare for SRST (AHCI-1.1 10.4.1) */ |
1674 | rc = ahci_kick_engine(ap, 1); | 1725 | rc = ahci_kick_engine(ap); |
1675 | if (rc && rc != -EOPNOTSUPP) | 1726 | if (rc && rc != -EOPNOTSUPP) |
1676 | ata_link_printk(link, KERN_WARNING, | 1727 | ata_link_printk(link, KERN_WARNING, |
1677 | "failed to reset engine (errno=%d)\n", rc); | 1728 | "failed to reset engine (errno=%d)\n", rc); |
@@ -1701,12 +1752,21 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1701 | 1752 | ||
1702 | /* wait for link to become ready */ | 1753 | /* wait for link to become ready */ |
1703 | rc = ata_wait_after_reset(link, deadline, check_ready); | 1754 | rc = ata_wait_after_reset(link, deadline, check_ready); |
1704 | /* link occupied, -ENODEV too is an error */ | 1755 | if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { |
1705 | if (rc) { | 1756 | /* |
1757 | * Workaround for cases where link online status can't | ||
1758 | * be trusted. Treat device readiness timeout as link | ||
1759 | * offline. | ||
1760 | */ | ||
1761 | ata_link_printk(link, KERN_INFO, | ||
1762 | "device not ready, treating as offline\n"); | ||
1763 | *class = ATA_DEV_NONE; | ||
1764 | } else if (rc) { | ||
1765 | /* link occupied, -ENODEV too is an error */ | ||
1706 | reason = "device not ready"; | 1766 | reason = "device not ready"; |
1707 | goto fail; | 1767 | goto fail; |
1708 | } | 1768 | } else |
1709 | *class = ahci_dev_classify(ap); | 1769 | *class = ahci_dev_classify(ap); |
1710 | 1770 | ||
1711 | DPRINTK("EXIT, class=%u\n", *class); | 1771 | DPRINTK("EXIT, class=%u\n", *class); |
1712 | return 0; | 1772 | return 0; |
@@ -1773,7 +1833,8 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, | |||
1773 | irq_sts = readl(port_mmio + PORT_IRQ_STAT); | 1833 | irq_sts = readl(port_mmio + PORT_IRQ_STAT); |
1774 | if (irq_sts & PORT_IRQ_BAD_PMP) { | 1834 | if (irq_sts & PORT_IRQ_BAD_PMP) { |
1775 | ata_link_printk(link, KERN_WARNING, | 1835 | ata_link_printk(link, KERN_WARNING, |
1776 | "failed due to HW bug, retry pmp=0\n"); | 1836 | "applying SB600 PMP SRST workaround " |
1837 | "and retrying\n"); | ||
1777 | rc = ahci_do_softreset(link, class, 0, deadline, | 1838 | rc = ahci_do_softreset(link, class, 0, deadline, |
1778 | ahci_check_ready); | 1839 | ahci_check_ready); |
1779 | } | 1840 | } |
@@ -1877,7 +1938,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
1877 | rc = ata_wait_after_reset(link, jiffies + 2 * HZ, | 1938 | rc = ata_wait_after_reset(link, jiffies + 2 * HZ, |
1878 | ahci_check_ready); | 1939 | ahci_check_ready); |
1879 | if (rc) | 1940 | if (rc) |
1880 | ahci_kick_engine(ap, 0); | 1941 | ahci_kick_engine(ap); |
1881 | } | 1942 | } |
1882 | return rc; | 1943 | return rc; |
1883 | } | 1944 | } |
@@ -2258,7 +2319,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | |||
2258 | 2319 | ||
2259 | /* make DMA engine forget about the failed command */ | 2320 | /* make DMA engine forget about the failed command */ |
2260 | if (qc->flags & ATA_QCFLAG_FAILED) | 2321 | if (qc->flags & ATA_QCFLAG_FAILED) |
2261 | ahci_kick_engine(ap, 1); | 2322 | ahci_kick_engine(ap); |
2262 | } | 2323 | } |
2263 | 2324 | ||
2264 | static void ahci_pmp_attach(struct ata_port *ap) | 2325 | static void ahci_pmp_attach(struct ata_port *ap) |
@@ -2590,14 +2651,18 @@ static void ahci_p5wdh_workaround(struct ata_host *host) | |||
2590 | } | 2651 | } |
2591 | 2652 | ||
2592 | /* | 2653 | /* |
2593 | * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older | 2654 | * SB600 ahci controller on certain boards can't do 64bit DMA with |
2594 | * BIOS. The oldest version known to be broken is 0901 and working is | 2655 | * older BIOS. |
2595 | * 1501 which was released on 2007-10-26. Force 32bit DMA on anything | ||
2596 | * older than 1501. Please read bko#9412 for more info. | ||
2597 | */ | 2656 | */ |
2598 | static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev) | 2657 | static bool ahci_sb600_32bit_only(struct pci_dev *pdev) |
2599 | { | 2658 | { |
2600 | static const struct dmi_system_id sysids[] = { | 2659 | static const struct dmi_system_id sysids[] = { |
2660 | /* | ||
2661 | * The oldest version known to be broken is 0901 and | ||
2662 | * working is 1501 which was released on 2007-10-26. | ||
2663 | * Force 32bit DMA on anything older than 1501. | ||
2664 | * Please read bko#9412 for more info. | ||
2665 | */ | ||
2601 | { | 2666 | { |
2602 | .ident = "ASUS M2A-VM", | 2667 | .ident = "ASUS M2A-VM", |
2603 | .matches = { | 2668 | .matches = { |
@@ -2605,31 +2670,48 @@ static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev) | |||
2605 | "ASUSTeK Computer INC."), | 2670 | "ASUSTeK Computer INC."), |
2606 | DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), | 2671 | DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), |
2607 | }, | 2672 | }, |
2673 | .driver_data = "20071026", /* yyyymmdd */ | ||
2674 | }, | ||
2675 | /* | ||
2676 | * It's yet unknown whether more recent BIOS fixes the | ||
2677 | * problem. Blacklist the whole board for the time | ||
2678 | * being. Please read the following thread for more | ||
2679 | * info. | ||
2680 | * | ||
2681 | * http://thread.gmane.org/gmane.linux.ide/42326 | ||
2682 | */ | ||
2683 | { | ||
2684 | .ident = "Gigabyte GA-MA69VM-S2", | ||
2685 | .matches = { | ||
2686 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
2687 | "Gigabyte Technology Co., Ltd."), | ||
2688 | DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"), | ||
2689 | }, | ||
2608 | }, | 2690 | }, |
2609 | { } | 2691 | { } |
2610 | }; | 2692 | }; |
2611 | const char *cutoff_mmdd = "10/26"; | 2693 | const struct dmi_system_id *match; |
2612 | const char *date; | ||
2613 | int year; | ||
2614 | 2694 | ||
2695 | match = dmi_first_match(sysids); | ||
2615 | if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || | 2696 | if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || |
2616 | !dmi_check_system(sysids)) | 2697 | !match) |
2617 | return false; | 2698 | return false; |
2618 | 2699 | ||
2619 | /* | 2700 | if (match->driver_data) { |
2620 | * Argh.... both version and date are free form strings. | 2701 | int year, month, date; |
2621 | * Let's hope they're using the same date format across | 2702 | char buf[9]; |
2622 | * different versions. | 2703 | |
2623 | */ | 2704 | dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); |
2624 | date = dmi_get_system_info(DMI_BIOS_DATE); | 2705 | snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); |
2625 | year = dmi_get_year(DMI_BIOS_DATE); | ||
2626 | if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' && | ||
2627 | (year > 2007 || | ||
2628 | (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0))) | ||
2629 | return false; | ||
2630 | 2706 | ||
2631 | dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, " | 2707 | if (strcmp(buf, match->driver_data) >= 0) |
2632 | "forcing 32bit DMA, update BIOS\n"); | 2708 | return false; |
2709 | |||
2710 | dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, " | ||
2711 | "forcing 32bit DMA, update BIOS\n", match->ident); | ||
2712 | } else | ||
2713 | dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't " | ||
2714 | "do 64bit DMA, forcing 32bit\n", match->ident); | ||
2633 | 2715 | ||
2634 | return true; | 2716 | return true; |
2635 | } | 2717 | } |
@@ -2726,6 +2808,56 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
2726 | return !ver || strcmp(ver, dmi->driver_data) < 0; | 2808 | return !ver || strcmp(ver, dmi->driver_data) < 0; |
2727 | } | 2809 | } |
2728 | 2810 | ||
2811 | static bool ahci_broken_online(struct pci_dev *pdev) | ||
2812 | { | ||
2813 | #define ENCODE_BUSDEVFN(bus, slot, func) \ | ||
2814 | (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) | ||
2815 | static const struct dmi_system_id sysids[] = { | ||
2816 | /* | ||
2817 | * There are several gigabyte boards which use | ||
2818 | * SIMG5723s configured as hardware RAID. Certain | ||
2819 | * 5723 firmware revisions shipped there keep the link | ||
2820 | * online but fail to answer properly to SRST or | ||
2821 | * IDENTIFY when no device is attached downstream | ||
2822 | * causing libata to retry quite a few times leading | ||
2823 | * to excessive detection delay. | ||
2824 | * | ||
2825 | * As these firmwares respond to the second reset try | ||
2826 | * with invalid device signature, considering unknown | ||
2827 | * sig as offline works around the problem acceptably. | ||
2828 | */ | ||
2829 | { | ||
2830 | .ident = "EP45-DQ6", | ||
2831 | .matches = { | ||
2832 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
2833 | "Gigabyte Technology Co., Ltd."), | ||
2834 | DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), | ||
2835 | }, | ||
2836 | .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), | ||
2837 | }, | ||
2838 | { | ||
2839 | .ident = "EP45-DS5", | ||
2840 | .matches = { | ||
2841 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
2842 | "Gigabyte Technology Co., Ltd."), | ||
2843 | DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), | ||
2844 | }, | ||
2845 | .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), | ||
2846 | }, | ||
2847 | { } /* terminate list */ | ||
2848 | }; | ||
2849 | #undef ENCODE_BUSDEVFN | ||
2850 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | ||
2851 | unsigned int val; | ||
2852 | |||
2853 | if (!dmi) | ||
2854 | return false; | ||
2855 | |||
2856 | val = (unsigned long)dmi->driver_data; | ||
2857 | |||
2858 | return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); | ||
2859 | } | ||
2860 | |||
2729 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2861 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2730 | { | 2862 | { |
2731 | static int printed_version; | 2863 | static int printed_version; |
@@ -2794,8 +2926,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2794 | if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) | 2926 | if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) |
2795 | hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; | 2927 | hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; |
2796 | 2928 | ||
2797 | /* apply ASUS M2A_VM quirk */ | 2929 | /* apply sb600 32bit only quirk */ |
2798 | if (ahci_asus_m2a_vm_32bit_only(pdev)) | 2930 | if (ahci_sb600_32bit_only(pdev)) |
2799 | hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; | 2931 | hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; |
2800 | 2932 | ||
2801 | if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) | 2933 | if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) |
@@ -2806,7 +2938,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2806 | 2938 | ||
2807 | /* prepare host */ | 2939 | /* prepare host */ |
2808 | if (hpriv->cap & HOST_CAP_NCQ) | 2940 | if (hpriv->cap & HOST_CAP_NCQ) |
2809 | pi.flags |= ATA_FLAG_NCQ; | 2941 | pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA; |
2810 | 2942 | ||
2811 | if (hpriv->cap & HOST_CAP_PMP) | 2943 | if (hpriv->cap & HOST_CAP_PMP) |
2812 | pi.flags |= ATA_FLAG_PMP; | 2944 | pi.flags |= ATA_FLAG_PMP; |
@@ -2841,6 +2973,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2841 | "BIOS update required for suspend/resume\n"); | 2973 | "BIOS update required for suspend/resume\n"); |
2842 | } | 2974 | } |
2843 | 2975 | ||
2976 | if (ahci_broken_online(pdev)) { | ||
2977 | hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; | ||
2978 | dev_info(&pdev->dev, | ||
2979 | "online status unreliable, applying workaround\n"); | ||
2980 | } | ||
2981 | |||
2844 | /* CAP.NP sometimes indicate the index of the last enabled | 2982 | /* CAP.NP sometimes indicate the index of the last enabled |
2845 | * port, at other times, that of the last possible port, so | 2983 | * port, at other times, that of the last possible port, so |
2846 | * determining the maximum port number requires looking at | 2984 | * determining the maximum port number requires looking at |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 56b8a3ff1286..9ac4e378992e 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -664,6 +664,8 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) | |||
664 | return ata_sff_prereset(link, deadline); | 664 | return ata_sff_prereset(link, deadline); |
665 | } | 665 | } |
666 | 666 | ||
667 | static DEFINE_SPINLOCK(piix_lock); | ||
668 | |||
667 | /** | 669 | /** |
668 | * piix_set_piomode - Initialize host controller PATA PIO timings | 670 | * piix_set_piomode - Initialize host controller PATA PIO timings |
669 | * @ap: Port whose timings we are configuring | 671 | * @ap: Port whose timings we are configuring |
@@ -677,8 +679,9 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) | |||
677 | 679 | ||
678 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) | 680 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) |
679 | { | 681 | { |
680 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | ||
681 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | 682 | struct pci_dev *dev = to_pci_dev(ap->host->dev); |
683 | unsigned long flags; | ||
684 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | ||
682 | unsigned int is_slave = (adev->devno != 0); | 685 | unsigned int is_slave = (adev->devno != 0); |
683 | unsigned int master_port= ap->port_no ? 0x42 : 0x40; | 686 | unsigned int master_port= ap->port_no ? 0x42 : 0x40; |
684 | unsigned int slave_port = 0x44; | 687 | unsigned int slave_port = 0x44; |
@@ -708,6 +711,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
708 | if (adev->class == ATA_DEV_ATA) | 711 | if (adev->class == ATA_DEV_ATA) |
709 | control |= 4; /* PPE enable */ | 712 | control |= 4; /* PPE enable */ |
710 | 713 | ||
714 | spin_lock_irqsave(&piix_lock, flags); | ||
715 | |||
711 | /* PIO configuration clears DTE unconditionally. It will be | 716 | /* PIO configuration clears DTE unconditionally. It will be |
712 | * programmed in set_dmamode which is guaranteed to be called | 717 | * programmed in set_dmamode which is guaranteed to be called |
713 | * after set_piomode if any DMA mode is available. | 718 | * after set_piomode if any DMA mode is available. |
@@ -747,6 +752,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
747 | udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); | 752 | udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); |
748 | pci_write_config_byte(dev, 0x48, udma_enable); | 753 | pci_write_config_byte(dev, 0x48, udma_enable); |
749 | } | 754 | } |
755 | |||
756 | spin_unlock_irqrestore(&piix_lock, flags); | ||
750 | } | 757 | } |
751 | 758 | ||
752 | /** | 759 | /** |
@@ -764,6 +771,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
764 | static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) | 771 | static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) |
765 | { | 772 | { |
766 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | 773 | struct pci_dev *dev = to_pci_dev(ap->host->dev); |
774 | unsigned long flags; | ||
767 | u8 master_port = ap->port_no ? 0x42 : 0x40; | 775 | u8 master_port = ap->port_no ? 0x42 : 0x40; |
768 | u16 master_data; | 776 | u16 master_data; |
769 | u8 speed = adev->dma_mode; | 777 | u8 speed = adev->dma_mode; |
@@ -777,6 +785,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in | |||
777 | { 2, 1 }, | 785 | { 2, 1 }, |
778 | { 2, 3 }, }; | 786 | { 2, 3 }, }; |
779 | 787 | ||
788 | spin_lock_irqsave(&piix_lock, flags); | ||
789 | |||
780 | pci_read_config_word(dev, master_port, &master_data); | 790 | pci_read_config_word(dev, master_port, &master_data); |
781 | if (ap->udma_mask) | 791 | if (ap->udma_mask) |
782 | pci_read_config_byte(dev, 0x48, &udma_enable); | 792 | pci_read_config_byte(dev, 0x48, &udma_enable); |
@@ -867,6 +877,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in | |||
867 | /* Don't scribble on 0x48 if the controller does not support UDMA */ | 877 | /* Don't scribble on 0x48 if the controller does not support UDMA */ |
868 | if (ap->udma_mask) | 878 | if (ap->udma_mask) |
869 | pci_write_config_byte(dev, 0x48, udma_enable); | 879 | pci_write_config_byte(dev, 0x48, udma_enable); |
880 | |||
881 | spin_unlock_irqrestore(&piix_lock, flags); | ||
870 | } | 882 | } |
871 | 883 | ||
872 | /** | 884 | /** |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ac176da1f94e..01964b6e6f6b 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -689,6 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, | |||
689 | struct ata_taskfile tf, ptf, rtf; | 689 | struct ata_taskfile tf, ptf, rtf; |
690 | unsigned int err_mask; | 690 | unsigned int err_mask; |
691 | const char *level; | 691 | const char *level; |
692 | const char *descr; | ||
692 | char msg[60]; | 693 | char msg[60]; |
693 | int rc; | 694 | int rc; |
694 | 695 | ||
@@ -736,11 +737,13 @@ static int ata_acpi_run_tf(struct ata_device *dev, | |||
736 | snprintf(msg, sizeof(msg), "filtered out"); | 737 | snprintf(msg, sizeof(msg), "filtered out"); |
737 | rc = 0; | 738 | rc = 0; |
738 | } | 739 | } |
740 | descr = ata_get_cmd_descript(tf.command); | ||
739 | 741 | ||
740 | ata_dev_printk(dev, level, | 742 | ata_dev_printk(dev, level, |
741 | "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n", | 743 | "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", |
742 | tf.command, tf.feature, tf.nsect, tf.lbal, | 744 | tf.command, tf.feature, tf.nsect, tf.lbal, |
743 | tf.lbam, tf.lbah, tf.device, msg); | 745 | tf.lbam, tf.lbah, tf.device, |
746 | (descr ? descr : "unknown"), msg); | ||
744 | 747 | ||
745 | return rc; | 748 | return rc; |
746 | } | 749 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8ac98ff16d7d..df31deac5c82 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) | |||
709 | head = tf->device & 0xf; | 709 | head = tf->device & 0xf; |
710 | sect = tf->lbal; | 710 | sect = tf->lbal; |
711 | 711 | ||
712 | block = (cyl * dev->heads + head) * dev->sectors + sect; | 712 | if (!sect) { |
713 | ata_dev_printk(dev, KERN_WARNING, "device reported " | ||
714 | "invalid CHS sector 0\n"); | ||
715 | sect = 1; /* oh well */ | ||
716 | } | ||
717 | |||
718 | block = (cyl * dev->heads + head) * dev->sectors + sect - 1; | ||
713 | } | 719 | } |
714 | 720 | ||
715 | return block; | 721 | return block; |
@@ -2299,29 +2305,49 @@ static inline u8 ata_dev_knobble(struct ata_device *dev) | |||
2299 | return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); | 2305 | return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); |
2300 | } | 2306 | } |
2301 | 2307 | ||
2302 | static void ata_dev_config_ncq(struct ata_device *dev, | 2308 | static int ata_dev_config_ncq(struct ata_device *dev, |
2303 | char *desc, size_t desc_sz) | 2309 | char *desc, size_t desc_sz) |
2304 | { | 2310 | { |
2305 | struct ata_port *ap = dev->link->ap; | 2311 | struct ata_port *ap = dev->link->ap; |
2306 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); | 2312 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); |
2313 | unsigned int err_mask; | ||
2314 | char *aa_desc = ""; | ||
2307 | 2315 | ||
2308 | if (!ata_id_has_ncq(dev->id)) { | 2316 | if (!ata_id_has_ncq(dev->id)) { |
2309 | desc[0] = '\0'; | 2317 | desc[0] = '\0'; |
2310 | return; | 2318 | return 0; |
2311 | } | 2319 | } |
2312 | if (dev->horkage & ATA_HORKAGE_NONCQ) { | 2320 | if (dev->horkage & ATA_HORKAGE_NONCQ) { |
2313 | snprintf(desc, desc_sz, "NCQ (not used)"); | 2321 | snprintf(desc, desc_sz, "NCQ (not used)"); |
2314 | return; | 2322 | return 0; |
2315 | } | 2323 | } |
2316 | if (ap->flags & ATA_FLAG_NCQ) { | 2324 | if (ap->flags & ATA_FLAG_NCQ) { |
2317 | hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); | 2325 | hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); |
2318 | dev->flags |= ATA_DFLAG_NCQ; | 2326 | dev->flags |= ATA_DFLAG_NCQ; |
2319 | } | 2327 | } |
2320 | 2328 | ||
2329 | if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && | ||
2330 | (ap->flags & ATA_FLAG_FPDMA_AA) && | ||
2331 | ata_id_has_fpdma_aa(dev->id)) { | ||
2332 | err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, | ||
2333 | SATA_FPDMA_AA); | ||
2334 | if (err_mask) { | ||
2335 | ata_dev_printk(dev, KERN_ERR, "failed to enable AA" | ||
2336 | "(error_mask=0x%x)\n", err_mask); | ||
2337 | if (err_mask != AC_ERR_DEV) { | ||
2338 | dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; | ||
2339 | return -EIO; | ||
2340 | } | ||
2341 | } else | ||
2342 | aa_desc = ", AA"; | ||
2343 | } | ||
2344 | |||
2321 | if (hdepth >= ddepth) | 2345 | if (hdepth >= ddepth) |
2322 | snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); | 2346 | snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); |
2323 | else | 2347 | else |
2324 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); | 2348 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, |
2349 | ddepth, aa_desc); | ||
2350 | return 0; | ||
2325 | } | 2351 | } |
2326 | 2352 | ||
2327 | /** | 2353 | /** |
@@ -2461,7 +2487,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2461 | 2487 | ||
2462 | if (ata_id_has_lba(id)) { | 2488 | if (ata_id_has_lba(id)) { |
2463 | const char *lba_desc; | 2489 | const char *lba_desc; |
2464 | char ncq_desc[20]; | 2490 | char ncq_desc[24]; |
2465 | 2491 | ||
2466 | lba_desc = "LBA"; | 2492 | lba_desc = "LBA"; |
2467 | dev->flags |= ATA_DFLAG_LBA; | 2493 | dev->flags |= ATA_DFLAG_LBA; |
@@ -2475,7 +2501,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
2475 | } | 2501 | } |
2476 | 2502 | ||
2477 | /* config NCQ */ | 2503 | /* config NCQ */ |
2478 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); | 2504 | rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); |
2505 | if (rc) | ||
2506 | return rc; | ||
2479 | 2507 | ||
2480 | /* print device info to dmesg */ | 2508 | /* print device info to dmesg */ |
2481 | if (ata_msg_drv(ap) && print_info) { | 2509 | if (ata_msg_drv(ap) && print_info) { |
@@ -4302,6 +4330,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4302 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, | 4330 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, |
4303 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, | 4331 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, |
4304 | 4332 | ||
4333 | /* this one allows HPA unlocking but fails IOs on the area */ | ||
4334 | { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, | ||
4335 | |||
4305 | /* Devices which report 1 sector over size HPA */ | 4336 | /* Devices which report 1 sector over size HPA */ |
4306 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, | 4337 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
4307 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, | 4338 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 79711b64054b..a04488f0de88 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <scsi/scsi_eh.h> | 40 | #include <scsi/scsi_eh.h> |
41 | #include <scsi/scsi_device.h> | 41 | #include <scsi/scsi_device.h> |
42 | #include <scsi/scsi_cmnd.h> | 42 | #include <scsi/scsi_cmnd.h> |
43 | #include <scsi/scsi_dbg.h> | ||
43 | #include "../scsi/scsi_transport_api.h" | 44 | #include "../scsi/scsi_transport_api.h" |
44 | 45 | ||
45 | #include <linux/libata.h> | 46 | #include <linux/libata.h> |
@@ -999,7 +1000,9 @@ static void __ata_port_freeze(struct ata_port *ap) | |||
999 | * ata_port_freeze - abort & freeze port | 1000 | * ata_port_freeze - abort & freeze port |
1000 | * @ap: ATA port to freeze | 1001 | * @ap: ATA port to freeze |
1001 | * | 1002 | * |
1002 | * Abort and freeze @ap. | 1003 | * Abort and freeze @ap. The freeze operation must be called |
1004 | * first, because some hardware requires special operations | ||
1005 | * before the taskfile registers are accessible. | ||
1003 | * | 1006 | * |
1004 | * LOCKING: | 1007 | * LOCKING: |
1005 | * spin_lock_irqsave(host lock) | 1008 | * spin_lock_irqsave(host lock) |
@@ -1013,8 +1016,8 @@ int ata_port_freeze(struct ata_port *ap) | |||
1013 | 1016 | ||
1014 | WARN_ON(!ap->ops->error_handler); | 1017 | WARN_ON(!ap->ops->error_handler); |
1015 | 1018 | ||
1016 | nr_aborted = ata_port_abort(ap); | ||
1017 | __ata_port_freeze(ap); | 1019 | __ata_port_freeze(ap); |
1020 | nr_aborted = ata_port_abort(ap); | ||
1018 | 1021 | ||
1019 | return nr_aborted; | 1022 | return nr_aborted; |
1020 | } | 1023 | } |
@@ -2110,6 +2113,116 @@ void ata_eh_autopsy(struct ata_port *ap) | |||
2110 | } | 2113 | } |
2111 | 2114 | ||
2112 | /** | 2115 | /** |
2116 | * ata_get_cmd_descript - get description for ATA command | ||
2117 | * @command: ATA command code to get description for | ||
2118 | * | ||
2119 | * Return a textual description of the given command, or NULL if the | ||
2120 | * command is not known. | ||
2121 | * | ||
2122 | * LOCKING: | ||
2123 | * None | ||
2124 | */ | ||
2125 | const char *ata_get_cmd_descript(u8 command) | ||
2126 | { | ||
2127 | #ifdef CONFIG_ATA_VERBOSE_ERROR | ||
2128 | static const struct | ||
2129 | { | ||
2130 | u8 command; | ||
2131 | const char *text; | ||
2132 | } cmd_descr[] = { | ||
2133 | { ATA_CMD_DEV_RESET, "DEVICE RESET" }, | ||
2134 | { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, | ||
2135 | { ATA_CMD_STANDBY, "STANDBY" }, | ||
2136 | { ATA_CMD_IDLE, "IDLE" }, | ||
2137 | { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, | ||
2138 | { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, | ||
2139 | { ATA_CMD_NOP, "NOP" }, | ||
2140 | { ATA_CMD_FLUSH, "FLUSH CACHE" }, | ||
2141 | { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, | ||
2142 | { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, | ||
2143 | { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, | ||
2144 | { ATA_CMD_SERVICE, "SERVICE" }, | ||
2145 | { ATA_CMD_READ, "READ DMA" }, | ||
2146 | { ATA_CMD_READ_EXT, "READ DMA EXT" }, | ||
2147 | { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, | ||
2148 | { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, | ||
2149 | { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, | ||
2150 | { ATA_CMD_WRITE, "WRITE DMA" }, | ||
2151 | { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, | ||
2152 | { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, | ||
2153 | { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, | ||
2154 | { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, | ||
2155 | { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, | ||
2156 | { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, | ||
2157 | { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, | ||
2158 | { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, | ||
2159 | { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, | ||
2160 | { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, | ||
2161 | { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, | ||
2162 | { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, | ||
2163 | { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, | ||
2164 | { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, | ||
2165 | { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, | ||
2166 | { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, | ||
2167 | { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, | ||
2168 | { ATA_CMD_SET_FEATURES, "SET FEATURES" }, | ||
2169 | { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, | ||
2170 | { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, | ||
2171 | { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, | ||
2172 | { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, | ||
2173 | { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, | ||
2174 | { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, | ||
2175 | { ATA_CMD_SLEEP, "SLEEP" }, | ||
2176 | { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, | ||
2177 | { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, | ||
2178 | { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, | ||
2179 | { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, | ||
2180 | { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, | ||
2181 | { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, | ||
2182 | { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, | ||
2183 | { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, | ||
2184 | { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, | ||
2185 | { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, | ||
2186 | { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, | ||
2187 | { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, | ||
2188 | { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, | ||
2189 | { ATA_CMD_PMP_READ, "READ BUFFER" }, | ||
2190 | { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, | ||
2191 | { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, | ||
2192 | { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, | ||
2193 | { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, | ||
2194 | { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, | ||
2195 | { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, | ||
2196 | { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, | ||
2197 | { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, | ||
2198 | { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, | ||
2199 | { ATA_CMD_SMART, "SMART" }, | ||
2200 | { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, | ||
2201 | { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, | ||
2202 | { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, | ||
2203 | { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, | ||
2204 | { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, | ||
2205 | { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, | ||
2206 | { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, | ||
2207 | { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, | ||
2208 | { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, | ||
2209 | { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, | ||
2210 | { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, | ||
2211 | { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, | ||
2212 | { ATA_CMD_RESTORE, "RECALIBRATE" }, | ||
2213 | { 0, NULL } /* terminate list */ | ||
2214 | }; | ||
2215 | |||
2216 | unsigned int i; | ||
2217 | for (i = 0; cmd_descr[i].text; i++) | ||
2218 | if (cmd_descr[i].command == command) | ||
2219 | return cmd_descr[i].text; | ||
2220 | #endif | ||
2221 | |||
2222 | return NULL; | ||
2223 | } | ||
2224 | |||
2225 | /** | ||
2113 | * ata_eh_link_report - report error handling to user | 2226 | * ata_eh_link_report - report error handling to user |
2114 | * @link: ATA link EH is going on | 2227 | * @link: ATA link EH is going on |
2115 | * | 2228 | * |
@@ -2175,6 +2288,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2175 | ata_link_printk(link, KERN_ERR, "%s\n", desc); | 2288 | ata_link_printk(link, KERN_ERR, "%s\n", desc); |
2176 | } | 2289 | } |
2177 | 2290 | ||
2291 | #ifdef CONFIG_ATA_VERBOSE_ERROR | ||
2178 | if (ehc->i.serror) | 2292 | if (ehc->i.serror) |
2179 | ata_link_printk(link, KERN_ERR, | 2293 | ata_link_printk(link, KERN_ERR, |
2180 | "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", | 2294 | "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", |
@@ -2195,6 +2309,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2195 | ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", | 2309 | ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", |
2196 | ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", | 2310 | ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", |
2197 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); | 2311 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); |
2312 | #endif | ||
2198 | 2313 | ||
2199 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2314 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
2200 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 2315 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
@@ -2226,14 +2341,23 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2226 | dma_str[qc->dma_dir]); | 2341 | dma_str[qc->dma_dir]); |
2227 | } | 2342 | } |
2228 | 2343 | ||
2229 | if (ata_is_atapi(qc->tf.protocol)) | 2344 | if (ata_is_atapi(qc->tf.protocol)) { |
2230 | snprintf(cdb_buf, sizeof(cdb_buf), | 2345 | if (qc->scsicmd) |
2346 | scsi_print_command(qc->scsicmd); | ||
2347 | else | ||
2348 | snprintf(cdb_buf, sizeof(cdb_buf), | ||
2231 | "cdb %02x %02x %02x %02x %02x %02x %02x %02x " | 2349 | "cdb %02x %02x %02x %02x %02x %02x %02x %02x " |
2232 | "%02x %02x %02x %02x %02x %02x %02x %02x\n ", | 2350 | "%02x %02x %02x %02x %02x %02x %02x %02x\n ", |
2233 | cdb[0], cdb[1], cdb[2], cdb[3], | 2351 | cdb[0], cdb[1], cdb[2], cdb[3], |
2234 | cdb[4], cdb[5], cdb[6], cdb[7], | 2352 | cdb[4], cdb[5], cdb[6], cdb[7], |
2235 | cdb[8], cdb[9], cdb[10], cdb[11], | 2353 | cdb[8], cdb[9], cdb[10], cdb[11], |
2236 | cdb[12], cdb[13], cdb[14], cdb[15]); | 2354 | cdb[12], cdb[13], cdb[14], cdb[15]); |
2355 | } else { | ||
2356 | const char *descr = ata_get_cmd_descript(cmd->command); | ||
2357 | if (descr) | ||
2358 | ata_dev_printk(qc->dev, KERN_ERR, | ||
2359 | "failed command: %s\n", descr); | ||
2360 | } | ||
2237 | 2361 | ||
2238 | ata_dev_printk(qc->dev, KERN_ERR, | 2362 | ata_dev_printk(qc->dev, KERN_ERR, |
2239 | "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " | 2363 | "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " |
@@ -2252,6 +2376,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2252 | res->device, qc->err_mask, ata_err_string(qc->err_mask), | 2376 | res->device, qc->err_mask, ata_err_string(qc->err_mask), |
2253 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); | 2377 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); |
2254 | 2378 | ||
2379 | #ifdef CONFIG_ATA_VERBOSE_ERROR | ||
2255 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | | 2380 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | |
2256 | ATA_ERR)) { | 2381 | ATA_ERR)) { |
2257 | if (res->command & ATA_BUSY) | 2382 | if (res->command & ATA_BUSY) |
@@ -2275,6 +2400,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2275 | res->feature & ATA_UNC ? "UNC " : "", | 2400 | res->feature & ATA_UNC ? "UNC " : "", |
2276 | res->feature & ATA_IDNF ? "IDNF " : "", | 2401 | res->feature & ATA_IDNF ? "IDNF " : "", |
2277 | res->feature & ATA_ABORTED ? "ABRT " : ""); | 2402 | res->feature & ATA_ABORTED ? "ABRT " : ""); |
2403 | #endif | ||
2278 | } | 2404 | } |
2279 | } | 2405 | } |
2280 | 2406 | ||
@@ -2574,11 +2700,17 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2574 | postreset(slave, classes); | 2700 | postreset(slave, classes); |
2575 | } | 2701 | } |
2576 | 2702 | ||
2577 | /* clear cached SError */ | 2703 | /* |
2704 | * Some controllers can't be frozen very well and may set | ||
2705 | * spuruious error conditions during reset. Clear accumulated | ||
2706 | * error information. As reset is the final recovery action, | ||
2707 | * nothing is lost by doing this. | ||
2708 | */ | ||
2578 | spin_lock_irqsave(link->ap->lock, flags); | 2709 | spin_lock_irqsave(link->ap->lock, flags); |
2579 | link->eh_info.serror = 0; | 2710 | memset(&link->eh_info, 0, sizeof(link->eh_info)); |
2580 | if (slave) | 2711 | if (slave) |
2581 | slave->eh_info.serror = 0; | 2712 | memset(&slave->eh_info, 0, sizeof(link->eh_info)); |
2713 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; | ||
2582 | spin_unlock_irqrestore(link->ap->lock, flags); | 2714 | spin_unlock_irqrestore(link->ap->lock, flags); |
2583 | 2715 | ||
2584 | /* Make sure onlineness and classification result correspond. | 2716 | /* Make sure onlineness and classification result correspond. |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 619f2c33950e..51f0ffb78cbd 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -221,6 +221,8 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr) | |||
221 | { | 221 | { |
222 | u32 rev = gscr[SATA_PMP_GSCR_REV]; | 222 | u32 rev = gscr[SATA_PMP_GSCR_REV]; |
223 | 223 | ||
224 | if (rev & (1 << 3)) | ||
225 | return "1.2"; | ||
224 | if (rev & (1 << 2)) | 226 | if (rev & (1 << 2)) |
225 | return "1.1"; | 227 | return "1.1"; |
226 | if (rev & (1 << 1)) | 228 | if (rev & (1 << 1)) |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d0dfeef55db5..b4ee28dec521 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1119,10 +1119,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
1119 | 1119 | ||
1120 | blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); | 1120 | blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); |
1121 | } else { | 1121 | } else { |
1122 | if (ata_id_is_ssd(dev->id)) | ||
1123 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, | ||
1124 | sdev->request_queue); | ||
1125 | |||
1126 | /* ATA devices must be sector aligned */ | 1122 | /* ATA devices must be sector aligned */ |
1127 | blk_queue_update_dma_alignment(sdev->request_queue, | 1123 | blk_queue_update_dma_alignment(sdev->request_queue, |
1128 | ATA_SECT_SIZE - 1); | 1124 | ATA_SECT_SIZE - 1); |
@@ -1257,23 +1253,6 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) | |||
1257 | return queue_depth; | 1253 | return queue_depth; |
1258 | } | 1254 | } |
1259 | 1255 | ||
1260 | /* XXX: for spindown warning */ | ||
1261 | static void ata_delayed_done_timerfn(unsigned long arg) | ||
1262 | { | ||
1263 | struct scsi_cmnd *scmd = (void *)arg; | ||
1264 | |||
1265 | scmd->scsi_done(scmd); | ||
1266 | } | ||
1267 | |||
1268 | /* XXX: for spindown warning */ | ||
1269 | static void ata_delayed_done(struct scsi_cmnd *scmd) | ||
1270 | { | ||
1271 | static struct timer_list timer; | ||
1272 | |||
1273 | setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd); | ||
1274 | mod_timer(&timer, jiffies + 5 * HZ); | ||
1275 | } | ||
1276 | |||
1277 | /** | 1256 | /** |
1278 | * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command | 1257 | * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command |
1279 | * @qc: Storage for translated ATA taskfile | 1258 | * @qc: Storage for translated ATA taskfile |
@@ -1338,32 +1317,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) | |||
1338 | system_entering_hibernation()) | 1317 | system_entering_hibernation()) |
1339 | goto skip; | 1318 | goto skip; |
1340 | 1319 | ||
1341 | /* XXX: This is for backward compatibility, will be | ||
1342 | * removed. Read Documentation/feature-removal-schedule.txt | ||
1343 | * for more info. | ||
1344 | */ | ||
1345 | if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && | ||
1346 | (system_state == SYSTEM_HALT || | ||
1347 | system_state == SYSTEM_POWER_OFF)) { | ||
1348 | static unsigned long warned; | ||
1349 | |||
1350 | if (!test_and_set_bit(0, &warned)) { | ||
1351 | ata_dev_printk(qc->dev, KERN_WARNING, | ||
1352 | "DISK MIGHT NOT BE SPUN DOWN PROPERLY. " | ||
1353 | "UPDATE SHUTDOWN UTILITY\n"); | ||
1354 | ata_dev_printk(qc->dev, KERN_WARNING, | ||
1355 | "For more info, visit " | ||
1356 | "http://linux-ata.org/shutdown.html\n"); | ||
1357 | |||
1358 | /* ->scsi_done is not used, use it for | ||
1359 | * delayed completion. | ||
1360 | */ | ||
1361 | scmd->scsi_done = qc->scsidone; | ||
1362 | qc->scsidone = ata_delayed_done; | ||
1363 | } | ||
1364 | goto skip; | ||
1365 | } | ||
1366 | |||
1367 | /* Issue ATA STANDBY IMMEDIATE command */ | 1320 | /* Issue ATA STANDBY IMMEDIATE command */ |
1368 | tf->command = ATA_CMD_STANDBYNOW1; | 1321 | tf->command = ATA_CMD_STANDBYNOW1; |
1369 | } | 1322 | } |
@@ -1764,14 +1717,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1764 | } | 1717 | } |
1765 | } | 1718 | } |
1766 | 1719 | ||
1767 | /* XXX: track spindown state for spindown skipping and warning */ | ||
1768 | if (unlikely(qc->tf.command == ATA_CMD_STANDBY || | ||
1769 | qc->tf.command == ATA_CMD_STANDBYNOW1)) | ||
1770 | qc->dev->flags |= ATA_DFLAG_SPUNDOWN; | ||
1771 | else if (likely(system_state != SYSTEM_HALT && | ||
1772 | system_state != SYSTEM_POWER_OFF)) | ||
1773 | qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN; | ||
1774 | |||
1775 | if (need_sense && !ap->ops->error_handler) | 1720 | if (need_sense && !ap->ops->error_handler) |
1776 | ata_dump_status(ap->print_id, &qc->result_tf); | 1721 | ata_dump_status(ap->print_id, &qc->result_tf); |
1777 | 1722 | ||
@@ -2815,28 +2760,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2815 | goto invalid_fld; | 2760 | goto invalid_fld; |
2816 | 2761 | ||
2817 | /* | 2762 | /* |
2818 | * Filter TPM commands by default. These provide an | ||
2819 | * essentially uncontrolled encrypted "back door" between | ||
2820 | * applications and the disk. Set libata.allow_tpm=1 if you | ||
2821 | * have a real reason for wanting to use them. This ensures | ||
2822 | * that installed software cannot easily mess stuff up without | ||
2823 | * user intent. DVR type users will probably ship with this enabled | ||
2824 | * for movie content management. | ||
2825 | * | ||
2826 | * Note that for ATA8 we can issue a DCS change and DCS freeze lock | ||
2827 | * for this and should do in future but that it is not sufficient as | ||
2828 | * DCS is an optional feature set. Thus we also do the software filter | ||
2829 | * so that we comply with the TC consortium stated goal that the user | ||
2830 | * can turn off TC features of their system. | ||
2831 | */ | ||
2832 | if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) | ||
2833 | goto invalid_fld; | ||
2834 | |||
2835 | /* We may not issue DMA commands if no DMA mode is set */ | ||
2836 | if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) | ||
2837 | goto invalid_fld; | ||
2838 | |||
2839 | /* | ||
2840 | * 12 and 16 byte CDBs use different offsets to | 2763 | * 12 and 16 byte CDBs use different offsets to |
2841 | * provide the various register values. | 2764 | * provide the various register values. |
2842 | */ | 2765 | */ |
@@ -2885,6 +2808,41 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2885 | tf->device = dev->devno ? | 2808 | tf->device = dev->devno ? |
2886 | tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; | 2809 | tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; |
2887 | 2810 | ||
2811 | /* READ/WRITE LONG use a non-standard sect_size */ | ||
2812 | qc->sect_size = ATA_SECT_SIZE; | ||
2813 | switch (tf->command) { | ||
2814 | case ATA_CMD_READ_LONG: | ||
2815 | case ATA_CMD_READ_LONG_ONCE: | ||
2816 | case ATA_CMD_WRITE_LONG: | ||
2817 | case ATA_CMD_WRITE_LONG_ONCE: | ||
2818 | if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) | ||
2819 | goto invalid_fld; | ||
2820 | qc->sect_size = scsi_bufflen(scmd); | ||
2821 | } | ||
2822 | |||
2823 | /* | ||
2824 | * Set flags so that all registers will be written, pass on | ||
2825 | * write indication (used for PIO/DMA setup), result TF is | ||
2826 | * copied back and we don't whine too much about its failure. | ||
2827 | */ | ||
2828 | tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
2829 | if (scmd->sc_data_direction == DMA_TO_DEVICE) | ||
2830 | tf->flags |= ATA_TFLAG_WRITE; | ||
2831 | |||
2832 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | ||
2833 | |||
2834 | /* | ||
2835 | * Set transfer length. | ||
2836 | * | ||
2837 | * TODO: find out if we need to do more here to | ||
2838 | * cover scatter/gather case. | ||
2839 | */ | ||
2840 | ata_qc_set_pc_nbytes(qc); | ||
2841 | |||
2842 | /* We may not issue DMA commands if no DMA mode is set */ | ||
2843 | if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) | ||
2844 | goto invalid_fld; | ||
2845 | |||
2888 | /* sanity check for pio multi commands */ | 2846 | /* sanity check for pio multi commands */ |
2889 | if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) | 2847 | if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) |
2890 | goto invalid_fld; | 2848 | goto invalid_fld; |
@@ -2901,18 +2859,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2901 | multi_count); | 2859 | multi_count); |
2902 | } | 2860 | } |
2903 | 2861 | ||
2904 | /* READ/WRITE LONG use a non-standard sect_size */ | ||
2905 | qc->sect_size = ATA_SECT_SIZE; | ||
2906 | switch (tf->command) { | ||
2907 | case ATA_CMD_READ_LONG: | ||
2908 | case ATA_CMD_READ_LONG_ONCE: | ||
2909 | case ATA_CMD_WRITE_LONG: | ||
2910 | case ATA_CMD_WRITE_LONG_ONCE: | ||
2911 | if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) | ||
2912 | goto invalid_fld; | ||
2913 | qc->sect_size = scsi_bufflen(scmd); | ||
2914 | } | ||
2915 | |||
2916 | /* | 2862 | /* |
2917 | * Filter SET_FEATURES - XFER MODE command -- otherwise, | 2863 | * Filter SET_FEATURES - XFER MODE command -- otherwise, |
2918 | * SET_FEATURES - XFER MODE must be preceded/succeeded | 2864 | * SET_FEATURES - XFER MODE must be preceded/succeeded |
@@ -2920,30 +2866,27 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2920 | * controller (i.e. the reason for ->set_piomode(), | 2866 | * controller (i.e. the reason for ->set_piomode(), |
2921 | * ->set_dmamode(), and ->post_set_mode() hooks). | 2867 | * ->set_dmamode(), and ->post_set_mode() hooks). |
2922 | */ | 2868 | */ |
2923 | if ((tf->command == ATA_CMD_SET_FEATURES) | 2869 | if (tf->command == ATA_CMD_SET_FEATURES && |
2924 | && (tf->feature == SETFEATURES_XFER)) | 2870 | tf->feature == SETFEATURES_XFER) |
2925 | goto invalid_fld; | 2871 | goto invalid_fld; |
2926 | 2872 | ||
2927 | /* | 2873 | /* |
2928 | * Set flags so that all registers will be written, | 2874 | * Filter TPM commands by default. These provide an |
2929 | * and pass on write indication (used for PIO/DMA | 2875 | * essentially uncontrolled encrypted "back door" between |
2930 | * setup.) | 2876 | * applications and the disk. Set libata.allow_tpm=1 if you |
2931 | */ | 2877 | * have a real reason for wanting to use them. This ensures |
2932 | tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); | 2878 | * that installed software cannot easily mess stuff up without |
2933 | 2879 | * user intent. DVR type users will probably ship with this enabled | |
2934 | if (scmd->sc_data_direction == DMA_TO_DEVICE) | 2880 | * for movie content management. |
2935 | tf->flags |= ATA_TFLAG_WRITE; | ||
2936 | |||
2937 | /* | ||
2938 | * Set transfer length. | ||
2939 | * | 2881 | * |
2940 | * TODO: find out if we need to do more here to | 2882 | * Note that for ATA8 we can issue a DCS change and DCS freeze lock |
2941 | * cover scatter/gather case. | 2883 | * for this and should do in future but that it is not sufficient as |
2884 | * DCS is an optional feature set. Thus we also do the software filter | ||
2885 | * so that we comply with the TC consortium stated goal that the user | ||
2886 | * can turn off TC features of their system. | ||
2942 | */ | 2887 | */ |
2943 | ata_qc_set_pc_nbytes(qc); | 2888 | if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) |
2944 | 2889 | goto invalid_fld; | |
2945 | /* request result TF and be quiet about device error */ | ||
2946 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | ||
2947 | 2890 | ||
2948 | return 0; | 2891 | return 0; |
2949 | 2892 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 89a1e0018e71..be8e2628f82c 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -164,6 +164,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | |||
164 | extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, | 164 | extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, |
165 | unsigned int action); | 165 | unsigned int action); |
166 | extern void ata_eh_autopsy(struct ata_port *ap); | 166 | extern void ata_eh_autopsy(struct ata_port *ap); |
167 | const char *ata_get_cmd_descript(u8 command); | ||
167 | extern void ata_eh_report(struct ata_port *ap); | 168 | extern void ata_eh_report(struct ata_port *ap); |
168 | extern int ata_eh_reset(struct ata_link *link, int classify, | 169 | extern int ata_eh_reset(struct ata_link *link, int classify, |
169 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | 170 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, |
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 5702affcb325..41c94b1ae493 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c | |||
@@ -250,7 +250,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
250 | ata_port_desc(ap, "no IRQ, using PIO polling"); | 250 | ata_port_desc(ap, "no IRQ, using PIO polling"); |
251 | } | 251 | } |
252 | 252 | ||
253 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 253 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
254 | 254 | ||
255 | if (!info) { | 255 | if (!info) { |
256 | dev_err(dev, "failed to allocate memory for private data\n"); | 256 | dev_err(dev, "failed to allocate memory for private data\n"); |
@@ -275,7 +275,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
275 | if (!info->ide_addr) { | 275 | if (!info->ide_addr) { |
276 | dev_err(dev, "failed to map IO base\n"); | 276 | dev_err(dev, "failed to map IO base\n"); |
277 | ret = -ENOMEM; | 277 | ret = -ENOMEM; |
278 | goto err_ide_ioremap; | 278 | goto err_put; |
279 | } | 279 | } |
280 | 280 | ||
281 | info->alt_addr = devm_ioremap(dev, | 281 | info->alt_addr = devm_ioremap(dev, |
@@ -284,7 +284,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
284 | if (!info->alt_addr) { | 284 | if (!info->alt_addr) { |
285 | dev_err(dev, "failed to map CTL base\n"); | 285 | dev_err(dev, "failed to map CTL base\n"); |
286 | ret = -ENOMEM; | 286 | ret = -ENOMEM; |
287 | goto err_alt_ioremap; | 287 | goto err_put; |
288 | } | 288 | } |
289 | 289 | ||
290 | ap->ioaddr.cmd_addr = info->ide_addr; | 290 | ap->ioaddr.cmd_addr = info->ide_addr; |
@@ -303,13 +303,8 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
303 | irq ? ata_sff_interrupt : NULL, | 303 | irq ? ata_sff_interrupt : NULL, |
304 | irq_flags, &pata_at91_sht); | 304 | irq_flags, &pata_at91_sht); |
305 | 305 | ||
306 | err_alt_ioremap: | 306 | err_put: |
307 | devm_iounmap(dev, info->ide_addr); | ||
308 | |||
309 | err_ide_ioremap: | ||
310 | clk_put(info->mck); | 307 | clk_put(info->mck); |
311 | kfree(info); | ||
312 | |||
313 | return ret; | 308 | return ret; |
314 | } | 309 | } |
315 | 310 | ||
@@ -317,7 +312,6 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
317 | { | 312 | { |
318 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 313 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
319 | struct at91_ide_info *info; | 314 | struct at91_ide_info *info; |
320 | struct device *dev = &pdev->dev; | ||
321 | 315 | ||
322 | if (!host) | 316 | if (!host) |
323 | return 0; | 317 | return 0; |
@@ -328,11 +322,8 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
328 | if (!info) | 322 | if (!info) |
329 | return 0; | 323 | return 0; |
330 | 324 | ||
331 | devm_iounmap(dev, info->ide_addr); | ||
332 | devm_iounmap(dev, info->alt_addr); | ||
333 | clk_put(info->mck); | 325 | clk_put(info->mck); |
334 | 326 | ||
335 | kfree(info); | ||
336 | return 0; | 327 | return 0; |
337 | } | 328 | } |
338 | 329 | ||
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index bec0b8ade66d..aa4b3f6ae771 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * pata_atiixp.c - ATI PATA for new ATA layer | 2 | * pata_atiixp.c - ATI PATA for new ATA layer |
3 | * (C) 2005 Red Hat Inc | 3 | * (C) 2005 Red Hat Inc |
4 | * (C) 2009 Bartlomiej Zolnierkiewicz | ||
4 | * | 5 | * |
5 | * Based on | 6 | * Based on |
6 | * | 7 | * |
@@ -61,20 +62,19 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, | |||
61 | 62 | ||
62 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 63 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
63 | int dn = 2 * ap->port_no + adev->devno; | 64 | int dn = 2 * ap->port_no + adev->devno; |
64 | |||
65 | /* Check this is correct - the order is odd in both drivers */ | ||
66 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); | 65 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); |
67 | u16 pio_mode_data, pio_timing_data; | 66 | u32 pio_timing_data; |
67 | u16 pio_mode_data; | ||
68 | 68 | ||
69 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); | 69 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); |
70 | pio_mode_data &= ~(0x7 << (4 * dn)); | 70 | pio_mode_data &= ~(0x7 << (4 * dn)); |
71 | pio_mode_data |= pio << (4 * dn); | 71 | pio_mode_data |= pio << (4 * dn); |
72 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); | 72 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); |
73 | 73 | ||
74 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); | 74 | pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); |
75 | pio_timing_data &= ~(0xFF << timing_shift); | 75 | pio_timing_data &= ~(0xFF << timing_shift); |
76 | pio_timing_data |= (pio_timings[pio] << timing_shift); | 76 | pio_timing_data |= (pio_timings[pio] << timing_shift); |
77 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); | 77 | pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); |
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
@@ -119,16 +119,17 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
119 | udma_mode_data |= dma << (4 * dn); | 119 | udma_mode_data |= dma << (4 * dn); |
120 | pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); | 120 | pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); |
121 | } else { | 121 | } else { |
122 | u16 mwdma_timing_data; | ||
123 | /* Check this is correct - the order is odd in both drivers */ | ||
124 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); | 122 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); |
123 | u32 mwdma_timing_data; | ||
125 | 124 | ||
126 | dma -= XFER_MW_DMA_0; | 125 | dma -= XFER_MW_DMA_0; |
127 | 126 | ||
128 | pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data); | 127 | pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, |
128 | &mwdma_timing_data); | ||
129 | mwdma_timing_data &= ~(0xFF << timing_shift); | 129 | mwdma_timing_data &= ~(0xFF << timing_shift); |
130 | mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); | 130 | mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); |
131 | pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data); | 131 | pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, |
132 | mwdma_timing_data); | ||
132 | } | 133 | } |
133 | /* | 134 | /* |
134 | * We must now look at the PIO mode situation. We may need to | 135 | * We must now look at the PIO mode situation. We may need to |
@@ -245,6 +246,7 @@ static const struct pci_device_id atiixp[] = { | |||
245 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, | 246 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, |
246 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, | 247 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, |
247 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, | 248 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, |
249 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), }, | ||
248 | 250 | ||
249 | { }, | 251 | { }, |
250 | }; | 252 | }; |
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index d33aa28239a9..403f56165cec 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c | |||
@@ -202,7 +202,8 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
202 | } | 202 | } |
203 | 203 | ||
204 | static const struct pci_device_id cs5535[] = { | 204 | static const struct pci_device_id cs5535[] = { |
205 | { PCI_VDEVICE(NS, 0x002D), }, | 205 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), }, |
206 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), }, | ||
206 | 207 | ||
207 | { }, | 208 | { }, |
208 | }; | 209 | }; |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index abdd19fe990a..d6f69561dc86 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
@@ -213,7 +213,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) | |||
213 | * This is tI, C.F. spec. says 0, but Sony CF card requires | 213 | * This is tI, C.F. spec. says 0, but Sony CF card requires |
214 | * more, we use 20 nS. | 214 | * more, we use 20 nS. |
215 | */ | 215 | */ |
216 | dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);; | 216 | dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20); |
217 | dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); | 217 | dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); |
218 | 218 | ||
219 | dma_tim.s.dmarq = dma_arq; | 219 | dma_tim.s.dmarq = dma_arq; |
@@ -841,7 +841,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev) | |||
841 | ocd = pdev->dev.platform_data; | 841 | ocd = pdev->dev.platform_data; |
842 | 842 | ||
843 | cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, | 843 | cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, |
844 | res_cs0->end - res_cs0->start + 1); | 844 | resource_size(res_cs0)); |
845 | 845 | ||
846 | if (!cs0) | 846 | if (!cs0) |
847 | return -ENOMEM; | 847 | return -ENOMEM; |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index d8d743af3225..3f6ebc6c665a 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -151,14 +151,14 @@ int __devinit __pata_platform_probe(struct device *dev, | |||
151 | */ | 151 | */ |
152 | if (mmio) { | 152 | if (mmio) { |
153 | ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, | 153 | ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, |
154 | io_res->end - io_res->start + 1); | 154 | resource_size(io_res)); |
155 | ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, | 155 | ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, |
156 | ctl_res->end - ctl_res->start + 1); | 156 | resource_size(ctl_res)); |
157 | } else { | 157 | } else { |
158 | ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, | 158 | ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, |
159 | io_res->end - io_res->start + 1); | 159 | resource_size(io_res)); |
160 | ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, | 160 | ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, |
161 | ctl_res->end - ctl_res->start + 1); | 161 | resource_size(ctl_res)); |
162 | } | 162 | } |
163 | if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { | 163 | if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { |
164 | dev_err(dev, "failed to map IO/CTL base\n"); | 164 | dev_err(dev, "failed to map IO/CTL base\n"); |
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 8e3cdef8a25f..45f1e10f917b 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c | |||
@@ -151,7 +151,7 @@ static __devinit int rb532_pata_driver_probe(struct platform_device *pdev) | |||
151 | info->irq = irq; | 151 | info->irq = irq; |
152 | 152 | ||
153 | info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, | 153 | info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, |
154 | res->end - res->start + 1); | 154 | resource_size(res)); |
155 | if (!info->iobase) | 155 | if (!info->iobase) |
156 | return -ENOMEM; | 156 | return -ENOMEM; |
157 | 157 | ||
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c new file mode 100644 index 000000000000..c843a1e07c4f --- /dev/null +++ b/drivers/ata/pata_rdc.c | |||
@@ -0,0 +1,400 @@ | |||
1 | /* | ||
2 | * pata_rdc - Driver for later RDC PATA controllers | ||
3 | * | ||
4 | * This is actually a driver for hardware meeting | ||
5 | * INCITS 370-2004 (1510D): ATA Host Adapter Standards | ||
6 | * | ||
7 | * Based on ata_piix. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; see the file COPYING. If not, write to | ||
21 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/blkdev.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <scsi/scsi_host.h> | ||
32 | #include <linux/libata.h> | ||
33 | #include <linux/dmi.h> | ||
34 | |||
35 | #define DRV_NAME "pata_rdc" | ||
36 | #define DRV_VERSION "0.01" | ||
37 | |||
38 | struct rdc_host_priv { | ||
39 | u32 saved_iocfg; | ||
40 | }; | ||
41 | |||
42 | /** | ||
43 | * rdc_pata_cable_detect - Probe host controller cable detect info | ||
44 | * @ap: Port for which cable detect info is desired | ||
45 | * | ||
46 | * Read 80c cable indicator from ATA PCI device's PCI config | ||
47 | * register. This register is normally set by firmware (BIOS). | ||
48 | * | ||
49 | * LOCKING: | ||
50 | * None (inherited from caller). | ||
51 | */ | ||
52 | |||
53 | static int rdc_pata_cable_detect(struct ata_port *ap) | ||
54 | { | ||
55 | struct rdc_host_priv *hpriv = ap->host->private_data; | ||
56 | u8 mask; | ||
57 | |||
58 | /* check BIOS cable detect results */ | ||
59 | mask = 0x30 << (2 * ap->port_no); | ||
60 | if ((hpriv->saved_iocfg & mask) == 0) | ||
61 | return ATA_CBL_PATA40; | ||
62 | return ATA_CBL_PATA80; | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * rdc_pata_prereset - prereset for PATA host controller | ||
67 | * @link: Target link | ||
68 | * @deadline: deadline jiffies for the operation | ||
69 | * | ||
70 | * LOCKING: | ||
71 | * None (inherited from caller). | ||
72 | */ | ||
73 | static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline) | ||
74 | { | ||
75 | struct ata_port *ap = link->ap; | ||
76 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
77 | |||
78 | static const struct pci_bits rdc_enable_bits[] = { | ||
79 | { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ | ||
80 | { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ | ||
81 | }; | ||
82 | |||
83 | if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no])) | ||
84 | return -ENOENT; | ||
85 | return ata_sff_prereset(link, deadline); | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * rdc_set_piomode - Initialize host controller PATA PIO timings | ||
90 | * @ap: Port whose timings we are configuring | ||
91 | * @adev: um | ||
92 | * | ||
93 | * Set PIO mode for device, in host controller PCI config space. | ||
94 | * | ||
95 | * LOCKING: | ||
96 | * None (inherited from caller). | ||
97 | */ | ||
98 | |||
99 | static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
100 | { | ||
101 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | ||
102 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | ||
103 | unsigned int is_slave = (adev->devno != 0); | ||
104 | unsigned int master_port= ap->port_no ? 0x42 : 0x40; | ||
105 | unsigned int slave_port = 0x44; | ||
106 | u16 master_data; | ||
107 | u8 slave_data; | ||
108 | u8 udma_enable; | ||
109 | int control = 0; | ||
110 | |||
111 | static const /* ISP RTC */ | ||
112 | u8 timings[][2] = { { 0, 0 }, | ||
113 | { 0, 0 }, | ||
114 | { 1, 0 }, | ||
115 | { 2, 1 }, | ||
116 | { 2, 3 }, }; | ||
117 | |||
118 | if (pio >= 2) | ||
119 | control |= 1; /* TIME1 enable */ | ||
120 | if (ata_pio_need_iordy(adev)) | ||
121 | control |= 2; /* IE enable */ | ||
122 | |||
123 | if (adev->class == ATA_DEV_ATA) | ||
124 | control |= 4; /* PPE enable */ | ||
125 | |||
126 | /* PIO configuration clears DTE unconditionally. It will be | ||
127 | * programmed in set_dmamode which is guaranteed to be called | ||
128 | * after set_piomode if any DMA mode is available. | ||
129 | */ | ||
130 | pci_read_config_word(dev, master_port, &master_data); | ||
131 | if (is_slave) { | ||
132 | /* clear TIME1|IE1|PPE1|DTE1 */ | ||
133 | master_data &= 0xff0f; | ||
134 | /* Enable SITRE (separate slave timing register) */ | ||
135 | master_data |= 0x4000; | ||
136 | /* enable PPE1, IE1 and TIME1 as needed */ | ||
137 | master_data |= (control << 4); | ||
138 | pci_read_config_byte(dev, slave_port, &slave_data); | ||
139 | slave_data &= (ap->port_no ? 0x0f : 0xf0); | ||
140 | /* Load the timing nibble for this slave */ | ||
141 | slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) | ||
142 | << (ap->port_no ? 4 : 0); | ||
143 | } else { | ||
144 | /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */ | ||
145 | master_data &= 0xccf0; | ||
146 | /* Enable PPE, IE and TIME as appropriate */ | ||
147 | master_data |= control; | ||
148 | /* load ISP and RCT */ | ||
149 | master_data |= | ||
150 | (timings[pio][0] << 12) | | ||
151 | (timings[pio][1] << 8); | ||
152 | } | ||
153 | pci_write_config_word(dev, master_port, master_data); | ||
154 | if (is_slave) | ||
155 | pci_write_config_byte(dev, slave_port, slave_data); | ||
156 | |||
157 | /* Ensure the UDMA bit is off - it will be turned back on if | ||
158 | UDMA is selected */ | ||
159 | |||
160 | pci_read_config_byte(dev, 0x48, &udma_enable); | ||
161 | udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); | ||
162 | pci_write_config_byte(dev, 0x48, udma_enable); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * rdc_set_dmamode - Initialize host controller PATA PIO timings | ||
167 | * @ap: Port whose timings we are configuring | ||
168 | * @adev: Drive in question | ||
169 | * | ||
170 | * Set UDMA mode for device, in host controller PCI config space. | ||
171 | * | ||
172 | * LOCKING: | ||
173 | * None (inherited from caller). | ||
174 | */ | ||
175 | |||
176 | static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
177 | { | ||
178 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | ||
179 | u8 master_port = ap->port_no ? 0x42 : 0x40; | ||
180 | u16 master_data; | ||
181 | u8 speed = adev->dma_mode; | ||
182 | int devid = adev->devno + 2 * ap->port_no; | ||
183 | u8 udma_enable = 0; | ||
184 | |||
185 | static const /* ISP RTC */ | ||
186 | u8 timings[][2] = { { 0, 0 }, | ||
187 | { 0, 0 }, | ||
188 | { 1, 0 }, | ||
189 | { 2, 1 }, | ||
190 | { 2, 3 }, }; | ||
191 | |||
192 | pci_read_config_word(dev, master_port, &master_data); | ||
193 | pci_read_config_byte(dev, 0x48, &udma_enable); | ||
194 | |||
195 | if (speed >= XFER_UDMA_0) { | ||
196 | unsigned int udma = adev->dma_mode - XFER_UDMA_0; | ||
197 | u16 udma_timing; | ||
198 | u16 ideconf; | ||
199 | int u_clock, u_speed; | ||
200 | |||
201 | /* | ||
202 | * UDMA is handled by a combination of clock switching and | ||
203 | * selection of dividers | ||
204 | * | ||
205 | * Handy rule: Odd modes are UDMATIMx 01, even are 02 | ||
206 | * except UDMA0 which is 00 | ||
207 | */ | ||
208 | u_speed = min(2 - (udma & 1), udma); | ||
209 | if (udma == 5) | ||
210 | u_clock = 0x1000; /* 100Mhz */ | ||
211 | else if (udma > 2) | ||
212 | u_clock = 1; /* 66Mhz */ | ||
213 | else | ||
214 | u_clock = 0; /* 33Mhz */ | ||
215 | |||
216 | udma_enable |= (1 << devid); | ||
217 | |||
218 | /* Load the CT/RP selection */ | ||
219 | pci_read_config_word(dev, 0x4A, &udma_timing); | ||
220 | udma_timing &= ~(3 << (4 * devid)); | ||
221 | udma_timing |= u_speed << (4 * devid); | ||
222 | pci_write_config_word(dev, 0x4A, udma_timing); | ||
223 | |||
224 | /* Select a 33/66/100Mhz clock */ | ||
225 | pci_read_config_word(dev, 0x54, &ideconf); | ||
226 | ideconf &= ~(0x1001 << devid); | ||
227 | ideconf |= u_clock << devid; | ||
228 | pci_write_config_word(dev, 0x54, ideconf); | ||
229 | } else { | ||
230 | /* | ||
231 | * MWDMA is driven by the PIO timings. We must also enable | ||
232 | * IORDY unconditionally along with TIME1. PPE has already | ||
233 | * been set when the PIO timing was set. | ||
234 | */ | ||
235 | unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; | ||
236 | unsigned int control; | ||
237 | u8 slave_data; | ||
238 | const unsigned int needed_pio[3] = { | ||
239 | XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 | ||
240 | }; | ||
241 | int pio = needed_pio[mwdma] - XFER_PIO_0; | ||
242 | |||
243 | control = 3; /* IORDY|TIME1 */ | ||
244 | |||
245 | /* If the drive MWDMA is faster than it can do PIO then | ||
246 | we must force PIO into PIO0 */ | ||
247 | |||
248 | if (adev->pio_mode < needed_pio[mwdma]) | ||
249 | /* Enable DMA timing only */ | ||
250 | control |= 8; /* PIO cycles in PIO0 */ | ||
251 | |||
252 | if (adev->devno) { /* Slave */ | ||
253 | master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ | ||
254 | master_data |= control << 4; | ||
255 | pci_read_config_byte(dev, 0x44, &slave_data); | ||
256 | slave_data &= (ap->port_no ? 0x0f : 0xf0); | ||
257 | /* Load the matching timing */ | ||
258 | slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); | ||
259 | pci_write_config_byte(dev, 0x44, slave_data); | ||
260 | } else { /* Master */ | ||
261 | master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY | ||
262 | and master timing bits */ | ||
263 | master_data |= control; | ||
264 | master_data |= | ||
265 | (timings[pio][0] << 12) | | ||
266 | (timings[pio][1] << 8); | ||
267 | } | ||
268 | |||
269 | udma_enable &= ~(1 << devid); | ||
270 | pci_write_config_word(dev, master_port, master_data); | ||
271 | } | ||
272 | pci_write_config_byte(dev, 0x48, udma_enable); | ||
273 | } | ||
274 | |||
275 | static struct ata_port_operations rdc_pata_ops = { | ||
276 | .inherits = &ata_bmdma32_port_ops, | ||
277 | .cable_detect = rdc_pata_cable_detect, | ||
278 | .set_piomode = rdc_set_piomode, | ||
279 | .set_dmamode = rdc_set_dmamode, | ||
280 | .prereset = rdc_pata_prereset, | ||
281 | }; | ||
282 | |||
283 | static struct ata_port_info rdc_port_info = { | ||
284 | |||
285 | .flags = ATA_FLAG_SLAVE_POSS, | ||
286 | .pio_mask = ATA_PIO4, | ||
287 | .mwdma_mask = ATA_MWDMA2, | ||
288 | .udma_mask = ATA_UDMA5, | ||
289 | .port_ops = &rdc_pata_ops, | ||
290 | }; | ||
291 | |||
292 | static struct scsi_host_template rdc_sht = { | ||
293 | ATA_BMDMA_SHT(DRV_NAME), | ||
294 | }; | ||
295 | |||
296 | /** | ||
297 | * rdc_init_one - Register PIIX ATA PCI device with kernel services | ||
298 | * @pdev: PCI device to register | ||
299 | * @ent: Entry in rdc_pci_tbl matching with @pdev | ||
300 | * | ||
301 | * Called from kernel PCI layer. We probe for combined mode (sigh), | ||
302 | * and then hand over control to libata, for it to do the rest. | ||
303 | * | ||
304 | * LOCKING: | ||
305 | * Inherited from PCI layer (may sleep). | ||
306 | * | ||
307 | * RETURNS: | ||
308 | * Zero on success, or -ERRNO value. | ||
309 | */ | ||
310 | |||
311 | static int __devinit rdc_init_one(struct pci_dev *pdev, | ||
312 | const struct pci_device_id *ent) | ||
313 | { | ||
314 | static int printed_version; | ||
315 | struct device *dev = &pdev->dev; | ||
316 | struct ata_port_info port_info[2]; | ||
317 | const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; | ||
318 | unsigned long port_flags; | ||
319 | struct ata_host *host; | ||
320 | struct rdc_host_priv *hpriv; | ||
321 | int rc; | ||
322 | |||
323 | if (!printed_version++) | ||
324 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
325 | "version " DRV_VERSION "\n"); | ||
326 | |||
327 | port_info[0] = rdc_port_info; | ||
328 | port_info[1] = rdc_port_info; | ||
329 | |||
330 | port_flags = port_info[0].flags; | ||
331 | |||
332 | /* enable device and prepare host */ | ||
333 | rc = pcim_enable_device(pdev); | ||
334 | if (rc) | ||
335 | return rc; | ||
336 | |||
337 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | ||
338 | if (!hpriv) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | /* Save IOCFG, this will be used for cable detection, quirk | ||
342 | * detection and restoration on detach. | ||
343 | */ | ||
344 | pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); | ||
345 | |||
346 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | ||
347 | if (rc) | ||
348 | return rc; | ||
349 | host->private_data = hpriv; | ||
350 | |||
351 | pci_intx(pdev, 1); | ||
352 | |||
353 | host->flags |= ATA_HOST_PARALLEL_SCAN; | ||
354 | |||
355 | pci_set_master(pdev); | ||
356 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); | ||
357 | } | ||
358 | |||
359 | static void rdc_remove_one(struct pci_dev *pdev) | ||
360 | { | ||
361 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
362 | struct rdc_host_priv *hpriv = host->private_data; | ||
363 | |||
364 | pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg); | ||
365 | |||
366 | ata_pci_remove_one(pdev); | ||
367 | } | ||
368 | |||
369 | static const struct pci_device_id rdc_pci_tbl[] = { | ||
370 | { PCI_DEVICE(0x17F3, 0x1011), }, | ||
371 | { PCI_DEVICE(0x17F3, 0x1012), }, | ||
372 | { } /* terminate list */ | ||
373 | }; | ||
374 | |||
375 | static struct pci_driver rdc_pci_driver = { | ||
376 | .name = DRV_NAME, | ||
377 | .id_table = rdc_pci_tbl, | ||
378 | .probe = rdc_init_one, | ||
379 | .remove = rdc_remove_one, | ||
380 | }; | ||
381 | |||
382 | |||
383 | static int __init rdc_init(void) | ||
384 | { | ||
385 | return pci_register_driver(&rdc_pci_driver); | ||
386 | } | ||
387 | |||
388 | static void __exit rdc_exit(void) | ||
389 | { | ||
390 | pci_unregister_driver(&rdc_pci_driver); | ||
391 | } | ||
392 | |||
393 | module_init(rdc_init); | ||
394 | module_exit(rdc_exit); | ||
395 | |||
396 | MODULE_AUTHOR("Alan Cox (based on ata_piix)"); | ||
397 | MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers"); | ||
398 | MODULE_LICENSE("GPL"); | ||
399 | MODULE_DEVICE_TABLE(pci, rdc_pci_tbl); | ||
400 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index 0c574c065c62..a5e4dfe60b41 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c | |||
@@ -85,7 +85,6 @@ static int rz1000_fifo_disable(struct pci_dev *pdev) | |||
85 | 85 | ||
86 | static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 86 | static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
87 | { | 87 | { |
88 | static int printed_version; | ||
89 | static const struct ata_port_info info = { | 88 | static const struct ata_port_info info = { |
90 | .flags = ATA_FLAG_SLAVE_POSS, | 89 | .flags = ATA_FLAG_SLAVE_POSS, |
91 | .pio_mask = ATA_PIO4, | 90 | .pio_mask = ATA_PIO4, |
@@ -93,8 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en | |||
93 | }; | 92 | }; |
94 | const struct ata_port_info *ppi[] = { &info, NULL }; | 93 | const struct ata_port_info *ppi[] = { &info, NULL }; |
95 | 94 | ||
96 | if (!printed_version++) | 95 | printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); |
97 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); | ||
98 | 96 | ||
99 | if (rz1000_fifo_disable(pdev) == 0) | 97 | if (rz1000_fifo_disable(pdev) == 0) |
100 | return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL); | 98 | return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL); |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 94eaa432c40a..d344db42a002 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -1257,6 +1257,7 @@ static struct scsi_host_template sata_fsl_sht = { | |||
1257 | static struct ata_port_operations sata_fsl_ops = { | 1257 | static struct ata_port_operations sata_fsl_ops = { |
1258 | .inherits = &sata_pmp_port_ops, | 1258 | .inherits = &sata_pmp_port_ops, |
1259 | 1259 | ||
1260 | .qc_defer = ata_std_qc_defer, | ||
1260 | .qc_prep = sata_fsl_qc_prep, | 1261 | .qc_prep = sata_fsl_qc_prep, |
1261 | .qc_issue = sata_fsl_qc_issue, | 1262 | .qc_issue = sata_fsl_qc_issue, |
1262 | .qc_fill_rtf = sata_fsl_qc_fill_rtf, | 1263 | .qc_fill_rtf = sata_fsl_qc_fill_rtf, |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 8d890cc5a7ee..4406902b4293 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -405,7 +405,7 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance) | |||
405 | struct ata_host *host = dev_instance; | 405 | struct ata_host *host = dev_instance; |
406 | struct inic_host_priv *hpriv = host->private_data; | 406 | struct inic_host_priv *hpriv = host->private_data; |
407 | u16 host_irq_stat; | 407 | u16 host_irq_stat; |
408 | int i, handled = 0;; | 408 | int i, handled = 0; |
409 | 409 | ||
410 | host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); | 410 | host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); |
411 | 411 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index c19417e02208..17f9ff9067a2 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4013,7 +4013,7 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4013 | 4013 | ||
4014 | host->iomap = NULL; | 4014 | host->iomap = NULL; |
4015 | hpriv->base = devm_ioremap(&pdev->dev, res->start, | 4015 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
4016 | res->end - res->start + 1); | 4016 | resource_size(res)); |
4017 | hpriv->base -= SATAHC0_REG_BASE; | 4017 | hpriv->base -= SATAHC0_REG_BASE; |
4018 | 4018 | ||
4019 | /* | 4019 | /* |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b2d11f300c39..86a40582999c 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -602,6 +602,7 @@ MODULE_VERSION(DRV_VERSION); | |||
602 | 602 | ||
603 | static int adma_enabled; | 603 | static int adma_enabled; |
604 | static int swncq_enabled = 1; | 604 | static int swncq_enabled = 1; |
605 | static int msi_enabled; | ||
605 | 606 | ||
606 | static void nv_adma_register_mode(struct ata_port *ap) | 607 | static void nv_adma_register_mode(struct ata_port *ap) |
607 | { | 608 | { |
@@ -2459,6 +2460,11 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2459 | } else if (type == SWNCQ) | 2460 | } else if (type == SWNCQ) |
2460 | nv_swncq_host_init(host); | 2461 | nv_swncq_host_init(host); |
2461 | 2462 | ||
2463 | if (msi_enabled) { | ||
2464 | dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n"); | ||
2465 | pci_enable_msi(pdev); | ||
2466 | } | ||
2467 | |||
2462 | pci_set_master(pdev); | 2468 | pci_set_master(pdev); |
2463 | return ata_host_activate(host, pdev->irq, ipriv->irq_handler, | 2469 | return ata_host_activate(host, pdev->irq, ipriv->irq_handler, |
2464 | IRQF_SHARED, ipriv->sht); | 2470 | IRQF_SHARED, ipriv->sht); |
@@ -2558,4 +2564,6 @@ module_param_named(adma, adma_enabled, bool, 0444); | |||
2558 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); | 2564 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); |
2559 | module_param_named(swncq, swncq_enabled, bool, 0444); | 2565 | module_param_named(swncq, swncq_enabled, bool, 0444); |
2560 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); | 2566 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); |
2567 | module_param_named(msi, msi_enabled, bool, 0444); | ||
2568 | MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); | ||
2561 | 2569 | ||
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 35bd5cc7f285..3cb69d5fb817 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -565,6 +565,19 @@ static void sil_freeze(struct ata_port *ap) | |||
565 | tmp |= SIL_MASK_IDE0_INT << ap->port_no; | 565 | tmp |= SIL_MASK_IDE0_INT << ap->port_no; |
566 | writel(tmp, mmio_base + SIL_SYSCFG); | 566 | writel(tmp, mmio_base + SIL_SYSCFG); |
567 | readl(mmio_base + SIL_SYSCFG); /* flush */ | 567 | readl(mmio_base + SIL_SYSCFG); /* flush */ |
568 | |||
569 | /* Ensure DMA_ENABLE is off. | ||
570 | * | ||
571 | * This is because the controller will not give us access to the | ||
572 | * taskfile registers while a DMA is in progress | ||
573 | */ | ||
574 | iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE, | ||
575 | ap->ioaddr.bmdma_addr); | ||
576 | |||
577 | /* According to ata_bmdma_stop, an HDMA transition requires | ||
578 | * on PIO cycle. But we can't read a taskfile register. | ||
579 | */ | ||
580 | ioread8(ap->ioaddr.bmdma_addr); | ||
568 | } | 581 | } |
569 | 582 | ||
570 | static void sil_thaw(struct ata_port *ap) | 583 | static void sil_thaw(struct ata_port *ap) |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 77aa8d7ecec4..e6946fc527d0 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -846,6 +846,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) | |||
846 | if (!ata_is_atapi(qc->tf.protocol)) { | 846 | if (!ata_is_atapi(qc->tf.protocol)) { |
847 | prb = &cb->ata.prb; | 847 | prb = &cb->ata.prb; |
848 | sge = cb->ata.sge; | 848 | sge = cb->ata.sge; |
849 | if (ata_is_data(qc->tf.protocol)) { | ||
850 | u16 prot = 0; | ||
851 | ctrl = PRB_CTRL_PROTOCOL; | ||
852 | if (ata_is_ncq(qc->tf.protocol)) | ||
853 | prot |= PRB_PROT_NCQ; | ||
854 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
855 | prot |= PRB_PROT_WRITE; | ||
856 | else | ||
857 | prot |= PRB_PROT_READ; | ||
858 | prb->prot = cpu_to_le16(prot); | ||
859 | } | ||
849 | } else { | 860 | } else { |
850 | prb = &cb->atapi.prb; | 861 | prb = &cb->atapi.prb; |
851 | sge = cb->atapi.sge; | 862 | sge = cb->atapi.sge; |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 8f9833228619..f8a91bfd66a8 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -109,8 +109,9 @@ MODULE_LICENSE("GPL"); | |||
109 | MODULE_DEVICE_TABLE(pci, sis_pci_tbl); | 109 | MODULE_DEVICE_TABLE(pci, sis_pci_tbl); |
110 | MODULE_VERSION(DRV_VERSION); | 110 | MODULE_VERSION(DRV_VERSION); |
111 | 111 | ||
112 | static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) | 112 | static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg) |
113 | { | 113 | { |
114 | struct ata_port *ap = link->ap; | ||
114 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 115 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
115 | unsigned int addr = SIS_SCR_BASE + (4 * sc_reg); | 116 | unsigned int addr = SIS_SCR_BASE + (4 * sc_reg); |
116 | u8 pmr; | 117 | u8 pmr; |
@@ -131,6 +132,9 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) | |||
131 | break; | 132 | break; |
132 | } | 133 | } |
133 | } | 134 | } |
135 | if (link->pmp) | ||
136 | addr += 0x10; | ||
137 | |||
134 | return addr; | 138 | return addr; |
135 | } | 139 | } |
136 | 140 | ||
@@ -138,24 +142,12 @@ static u32 sis_scr_cfg_read(struct ata_link *link, | |||
138 | unsigned int sc_reg, u32 *val) | 142 | unsigned int sc_reg, u32 *val) |
139 | { | 143 | { |
140 | struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); | 144 | struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); |
141 | unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); | 145 | unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg); |
142 | u32 val2 = 0; | ||
143 | u8 pmr; | ||
144 | 146 | ||
145 | if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ | 147 | if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ |
146 | return -EINVAL; | 148 | return -EINVAL; |
147 | 149 | ||
148 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | ||
149 | |||
150 | pci_read_config_dword(pdev, cfg_addr, val); | 150 | pci_read_config_dword(pdev, cfg_addr, val); |
151 | |||
152 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || | ||
153 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | ||
154 | pci_read_config_dword(pdev, cfg_addr+0x10, &val2); | ||
155 | |||
156 | *val |= val2; | ||
157 | *val &= 0xfffffffb; /* avoid problems with powerdowned ports */ | ||
158 | |||
159 | return 0; | 151 | return 0; |
160 | } | 152 | } |
161 | 153 | ||
@@ -163,28 +155,16 @@ static int sis_scr_cfg_write(struct ata_link *link, | |||
163 | unsigned int sc_reg, u32 val) | 155 | unsigned int sc_reg, u32 val) |
164 | { | 156 | { |
165 | struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); | 157 | struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); |
166 | unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); | 158 | unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg); |
167 | u8 pmr; | ||
168 | |||
169 | if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ | ||
170 | return -EINVAL; | ||
171 | |||
172 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | ||
173 | 159 | ||
174 | pci_write_config_dword(pdev, cfg_addr, val); | 160 | pci_write_config_dword(pdev, cfg_addr, val); |
175 | |||
176 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || | ||
177 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | ||
178 | pci_write_config_dword(pdev, cfg_addr+0x10, val); | ||
179 | |||
180 | return 0; | 161 | return 0; |
181 | } | 162 | } |
182 | 163 | ||
183 | static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) | 164 | static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) |
184 | { | 165 | { |
185 | struct ata_port *ap = link->ap; | 166 | struct ata_port *ap = link->ap; |
186 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 167 | void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; |
187 | u8 pmr; | ||
188 | 168 | ||
189 | if (sc_reg > SCR_CONTROL) | 169 | if (sc_reg > SCR_CONTROL) |
190 | return -EINVAL; | 170 | return -EINVAL; |
@@ -192,39 +172,23 @@ static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) | |||
192 | if (ap->flags & SIS_FLAG_CFGSCR) | 172 | if (ap->flags & SIS_FLAG_CFGSCR) |
193 | return sis_scr_cfg_read(link, sc_reg, val); | 173 | return sis_scr_cfg_read(link, sc_reg, val); |
194 | 174 | ||
195 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | 175 | *val = ioread32(base + sc_reg * 4); |
196 | |||
197 | *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); | ||
198 | |||
199 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || | ||
200 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | ||
201 | *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); | ||
202 | |||
203 | *val &= 0xfffffffb; | ||
204 | |||
205 | return 0; | 176 | return 0; |
206 | } | 177 | } |
207 | 178 | ||
208 | static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) | 179 | static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) |
209 | { | 180 | { |
210 | struct ata_port *ap = link->ap; | 181 | struct ata_port *ap = link->ap; |
211 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 182 | void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; |
212 | u8 pmr; | ||
213 | 183 | ||
214 | if (sc_reg > SCR_CONTROL) | 184 | if (sc_reg > SCR_CONTROL) |
215 | return -EINVAL; | 185 | return -EINVAL; |
216 | 186 | ||
217 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | ||
218 | |||
219 | if (ap->flags & SIS_FLAG_CFGSCR) | 187 | if (ap->flags & SIS_FLAG_CFGSCR) |
220 | return sis_scr_cfg_write(link, sc_reg, val); | 188 | return sis_scr_cfg_write(link, sc_reg, val); |
221 | else { | 189 | |
222 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 190 | iowrite32(val, base + (sc_reg * 4)); |
223 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || | 191 | return 0; |
224 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | ||
225 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); | ||
226 | return 0; | ||
227 | } | ||
228 | } | 192 | } |
229 | 193 | ||
230 | static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 194 | static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -236,7 +200,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
236 | u32 genctl, val; | 200 | u32 genctl, val; |
237 | u8 pmr; | 201 | u8 pmr; |
238 | u8 port2_start = 0x20; | 202 | u8 port2_start = 0x20; |
239 | int rc; | 203 | int i, rc; |
240 | 204 | ||
241 | if (!printed_version++) | 205 | if (!printed_version++) |
242 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | 206 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -319,6 +283,17 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
319 | if (rc) | 283 | if (rc) |
320 | return rc; | 284 | return rc; |
321 | 285 | ||
286 | for (i = 0; i < 2; i++) { | ||
287 | struct ata_port *ap = host->ports[i]; | ||
288 | |||
289 | if (ap->flags & ATA_FLAG_SATA && | ||
290 | ap->flags & ATA_FLAG_SLAVE_POSS) { | ||
291 | rc = ata_slave_link_init(ap); | ||
292 | if (rc) | ||
293 | return rc; | ||
294 | } | ||
295 | } | ||
296 | |||
322 | if (!(pi.flags & SIS_FLAG_CFGSCR)) { | 297 | if (!(pi.flags & SIS_FLAG_CFGSCR)) { |
323 | void __iomem *mmio; | 298 | void __iomem *mmio; |
324 | 299 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 81cb01bfc356..456594bd97bc 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -483,9 +483,6 @@ int platform_driver_register(struct platform_driver *drv) | |||
483 | drv->driver.remove = platform_drv_remove; | 483 | drv->driver.remove = platform_drv_remove; |
484 | if (drv->shutdown) | 484 | if (drv->shutdown) |
485 | drv->driver.shutdown = platform_drv_shutdown; | 485 | drv->driver.shutdown = platform_drv_shutdown; |
486 | if (drv->suspend || drv->resume) | ||
487 | pr_warning("Platform driver '%s' needs updating - please use " | ||
488 | "dev_pm_ops\n", drv->driver.name); | ||
489 | 486 | ||
490 | return driver_register(&drv->driver); | 487 | return driver_register(&drv->driver); |
491 | } | 488 | } |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 5e41e6dd657b..db195abad698 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -155,7 +155,7 @@ struct aoedev { | |||
155 | u16 fw_ver; /* version of blade's firmware */ | 155 | u16 fw_ver; /* version of blade's firmware */ |
156 | struct work_struct work;/* disk create work struct */ | 156 | struct work_struct work;/* disk create work struct */ |
157 | struct gendisk *gd; | 157 | struct gendisk *gd; |
158 | struct request_queue blkq; | 158 | struct request_queue *blkq; |
159 | struct hd_geometry geo; | 159 | struct hd_geometry geo; |
160 | sector_t ssize; | 160 | sector_t ssize; |
161 | struct timer_list timer; | 161 | struct timer_list timer; |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 2307a271bdc9..95d344971eda 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -264,9 +264,13 @@ aoeblk_gdalloc(void *vp) | |||
264 | goto err_disk; | 264 | goto err_disk; |
265 | } | 265 | } |
266 | 266 | ||
267 | blk_queue_make_request(&d->blkq, aoeblk_make_request); | 267 | d->blkq = blk_alloc_queue(GFP_KERNEL); |
268 | if (bdi_init(&d->blkq.backing_dev_info)) | 268 | if (!d->blkq) |
269 | goto err_mempool; | 269 | goto err_mempool; |
270 | blk_queue_make_request(d->blkq, aoeblk_make_request); | ||
271 | d->blkq->backing_dev_info.name = "aoe"; | ||
272 | if (bdi_init(&d->blkq->backing_dev_info)) | ||
273 | goto err_blkq; | ||
270 | spin_lock_irqsave(&d->lock, flags); | 274 | spin_lock_irqsave(&d->lock, flags); |
271 | gd->major = AOE_MAJOR; | 275 | gd->major = AOE_MAJOR; |
272 | gd->first_minor = d->sysminor * AOE_PARTITIONS; | 276 | gd->first_minor = d->sysminor * AOE_PARTITIONS; |
@@ -276,7 +280,7 @@ aoeblk_gdalloc(void *vp) | |||
276 | snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", | 280 | snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", |
277 | d->aoemajor, d->aoeminor); | 281 | d->aoemajor, d->aoeminor); |
278 | 282 | ||
279 | gd->queue = &d->blkq; | 283 | gd->queue = d->blkq; |
280 | d->gd = gd; | 284 | d->gd = gd; |
281 | d->flags &= ~DEVFL_GDALLOC; | 285 | d->flags &= ~DEVFL_GDALLOC; |
282 | d->flags |= DEVFL_UP; | 286 | d->flags |= DEVFL_UP; |
@@ -287,6 +291,9 @@ aoeblk_gdalloc(void *vp) | |||
287 | aoedisk_add_sysfs(d); | 291 | aoedisk_add_sysfs(d); |
288 | return; | 292 | return; |
289 | 293 | ||
294 | err_blkq: | ||
295 | blk_cleanup_queue(d->blkq); | ||
296 | d->blkq = NULL; | ||
290 | err_mempool: | 297 | err_mempool: |
291 | mempool_destroy(d->bufpool); | 298 | mempool_destroy(d->bufpool); |
292 | err_disk: | 299 | err_disk: |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index eeea477d9601..fa67027789aa 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d) | |||
113 | if (d->bufpool) | 113 | if (d->bufpool) |
114 | mempool_destroy(d->bufpool); | 114 | mempool_destroy(d->bufpool); |
115 | skbpoolfree(d); | 115 | skbpoolfree(d); |
116 | blk_cleanup_queue(d->blkq); | ||
116 | kfree(d); | 117 | kfree(d); |
117 | } | 118 | } |
118 | 119 | ||
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 8c9d50db5c3a..c58557790585 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 | 49 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 |
50 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 | 50 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 |
51 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 | 51 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 |
52 | #define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 | ||
52 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 | 53 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 |
53 | 54 | ||
54 | /* cover 915 and 945 variants */ | 55 | /* cover 915 and 945 variants */ |
@@ -81,7 +82,8 @@ | |||
81 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | 82 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ |
82 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | 83 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ |
83 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ | 84 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ |
84 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) | 85 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ |
86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) | ||
85 | 87 | ||
86 | extern int agp_memory_reserved; | 88 | extern int agp_memory_reserved; |
87 | 89 | ||
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | |||
1216 | case PCI_DEVICE_ID_INTEL_G41_HB: | 1218 | case PCI_DEVICE_ID_INTEL_G41_HB: |
1217 | case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: | 1219 | case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: |
1218 | case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: | 1220 | case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: |
1221 | case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: | ||
1219 | *gtt_offset = *gtt_size = MB(2); | 1222 | *gtt_offset = *gtt_size = MB(2); |
1220 | break; | 1223 | break; |
1221 | default: | 1224 | default: |
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description { | |||
2195 | "IGDNG/D", NULL, &intel_i965_driver }, | 2198 | "IGDNG/D", NULL, &intel_i965_driver }, |
2196 | { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, | 2199 | { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, |
2197 | "IGDNG/M", NULL, &intel_i965_driver }, | 2200 | "IGDNG/M", NULL, &intel_i965_driver }, |
2201 | { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, | ||
2202 | "IGDNG/MA", NULL, &intel_i965_driver }, | ||
2198 | { 0, 0, 0, NULL, NULL, NULL } | 2203 | { 0, 0, 0, NULL, NULL, NULL } |
2199 | }; | 2204 | }; |
2200 | 2205 | ||
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2398 | ID(PCI_DEVICE_ID_INTEL_G41_HB), | 2403 | ID(PCI_DEVICE_ID_INTEL_G41_HB), |
2399 | ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), | 2404 | ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), |
2400 | ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), | 2405 | ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), |
2406 | ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), | ||
2401 | { } | 2407 | { } |
2402 | }; | 2408 | }; |
2403 | 2409 | ||
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 86105efb4eb6..0ecac7e532f6 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console) | |||
1006 | priv->dev->release = (void (*)(struct device *)) kfree; | 1006 | priv->dev->release = (void (*)(struct device *)) kfree; |
1007 | rc = device_register(priv->dev); | 1007 | rc = device_register(priv->dev); |
1008 | if (rc) { | 1008 | if (rc) { |
1009 | kfree(priv->dev); | 1009 | put_device(priv->dev); |
1010 | goto out_error_dev; | 1010 | goto out_error_dev; |
1011 | } | 1011 | } |
1012 | 1012 | ||
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index cd0ba51f7c80..0d8c5788b8e4 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c | |||
@@ -44,8 +44,8 @@ | |||
44 | * want to register another driver on the same PCI id. | 44 | * want to register another driver on the same PCI id. |
45 | */ | 45 | */ |
46 | static const struct pci_device_id pci_tbl[] = { | 46 | static const struct pci_device_id pci_tbl[] = { |
47 | { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 47 | { PCI_VDEVICE(AMD, 0x7443), 0, }, |
48 | { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 48 | { PCI_VDEVICE(AMD, 0x746b), 0, }, |
49 | { 0, }, /* terminate list */ | 49 | { 0, }, /* terminate list */ |
50 | }; | 50 | }; |
51 | MODULE_DEVICE_TABLE(pci, pci_tbl); | 51 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 64d513f68368..4c4d4e140f98 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c | |||
@@ -46,8 +46,7 @@ | |||
46 | * want to register another driver on the same PCI id. | 46 | * want to register another driver on the same PCI id. |
47 | */ | 47 | */ |
48 | static const struct pci_device_id pci_tbl[] = { | 48 | static const struct pci_device_id pci_tbl[] = { |
49 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, | 49 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, }, |
50 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | ||
51 | { 0, }, /* terminate list */ | 50 | { 0, }, /* terminate list */ |
52 | }; | 51 | }; |
53 | MODULE_DEVICE_TABLE(pci, pci_tbl); | 52 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index afa8813e737a..645237bda682 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = { | |||
822 | * - permits private mappings, "copies" are taken of the source of zeros | 822 | * - permits private mappings, "copies" are taken of the source of zeros |
823 | */ | 823 | */ |
824 | static struct backing_dev_info zero_bdi = { | 824 | static struct backing_dev_info zero_bdi = { |
825 | .name = "char/mem", | ||
825 | .capabilities = BDI_CAP_MAP_COPY, | 826 | .capabilities = BDI_CAP_MAP_COPY, |
826 | }; | 827 | }; |
827 | 828 | ||
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 973be2f44195..4e28b35024ec 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) | |||
300 | if (space < 2) | 300 | if (space < 2) |
301 | return -1; | 301 | return -1; |
302 | tty->canon_column = tty->column = 0; | 302 | tty->canon_column = tty->column = 0; |
303 | tty_put_char(tty, '\r'); | 303 | tty->ops->write(tty, "\r\n", 2); |
304 | tty_put_char(tty, c); | ||
305 | return 2; | 304 | return 2; |
306 | } | 305 | } |
307 | tty->canon_column = tty->column; | 306 | tty->canon_column = tty->column; |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 6e6942c45f5b..b33d6688e910 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to) | |||
109 | * the other side of the pty/tty pair. | 109 | * the other side of the pty/tty pair. |
110 | */ | 110 | */ |
111 | 111 | ||
112 | static int pty_write(struct tty_struct *tty, const unsigned char *buf, | 112 | static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) |
113 | int count) | ||
114 | { | 113 | { |
115 | struct tty_struct *to = tty->link; | 114 | struct tty_struct *to = tty->link; |
116 | int c; | ||
117 | 115 | ||
118 | if (tty->stopped) | 116 | if (tty->stopped) |
119 | return 0; | 117 | return 0; |
120 | 118 | ||
121 | /* This isn't locked but our 8K is quite sloppy so no | ||
122 | big deal */ | ||
123 | |||
124 | c = pty_space(to); | ||
125 | if (c > count) | ||
126 | c = count; | ||
127 | if (c > 0) { | 119 | if (c > 0) { |
128 | /* Stuff the data into the input queue of the other end */ | 120 | /* Stuff the data into the input queue of the other end */ |
129 | c = tty_insert_flip_string(to, buf, c); | 121 | c = tty_insert_flip_string(to, buf, c); |
@@ -144,6 +136,8 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, | |||
144 | 136 | ||
145 | static int pty_write_room(struct tty_struct *tty) | 137 | static int pty_write_room(struct tty_struct *tty) |
146 | { | 138 | { |
139 | if (tty->stopped) | ||
140 | return 0; | ||
147 | return pty_space(tty->link); | 141 | return pty_space(tty->link); |
148 | } | 142 | } |
149 | 143 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8c7444857a4b..d8a9255e1a3f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -240,6 +240,7 @@ | |||
240 | #include <linux/spinlock.h> | 240 | #include <linux/spinlock.h> |
241 | #include <linux/percpu.h> | 241 | #include <linux/percpu.h> |
242 | #include <linux/cryptohash.h> | 242 | #include <linux/cryptohash.h> |
243 | #include <linux/fips.h> | ||
243 | 244 | ||
244 | #ifdef CONFIG_GENERIC_HARDIRQS | 245 | #ifdef CONFIG_GENERIC_HARDIRQS |
245 | # include <linux/irq.h> | 246 | # include <linux/irq.h> |
@@ -413,6 +414,7 @@ struct entropy_store { | |||
413 | unsigned add_ptr; | 414 | unsigned add_ptr; |
414 | int entropy_count; | 415 | int entropy_count; |
415 | int input_rotate; | 416 | int input_rotate; |
417 | __u8 *last_data; | ||
416 | }; | 418 | }; |
417 | 419 | ||
418 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | 420 | static __u32 input_pool_data[INPUT_POOL_WORDS]; |
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
852 | { | 854 | { |
853 | ssize_t ret = 0, i; | 855 | ssize_t ret = 0, i; |
854 | __u8 tmp[EXTRACT_SIZE]; | 856 | __u8 tmp[EXTRACT_SIZE]; |
857 | unsigned long flags; | ||
855 | 858 | ||
856 | xfer_secondary_pool(r, nbytes); | 859 | xfer_secondary_pool(r, nbytes); |
857 | nbytes = account(r, nbytes, min, reserved); | 860 | nbytes = account(r, nbytes, min, reserved); |
858 | 861 | ||
859 | while (nbytes) { | 862 | while (nbytes) { |
860 | extract_buf(r, tmp); | 863 | extract_buf(r, tmp); |
864 | |||
865 | if (r->last_data) { | ||
866 | spin_lock_irqsave(&r->lock, flags); | ||
867 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | ||
868 | panic("Hardware RNG duplicated output!\n"); | ||
869 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | ||
870 | spin_unlock_irqrestore(&r->lock, flags); | ||
871 | } | ||
861 | i = min_t(int, nbytes, EXTRACT_SIZE); | 872 | i = min_t(int, nbytes, EXTRACT_SIZE); |
862 | memcpy(buf, tmp, i); | 873 | memcpy(buf, tmp, i); |
863 | nbytes -= i; | 874 | nbytes -= i; |
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r) | |||
940 | now = ktime_get_real(); | 951 | now = ktime_get_real(); |
941 | mix_pool_bytes(r, &now, sizeof(now)); | 952 | mix_pool_bytes(r, &now, sizeof(now)); |
942 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); | 953 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
954 | /* Enable continuous test in fips mode */ | ||
955 | if (fips_enabled) | ||
956 | r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL); | ||
943 | } | 957 | } |
944 | 958 | ||
945 | static int rand_initialize(void) | 959 | static int rand_initialize(void) |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 5d7a02f63e1c..50eecfe1d724 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/sysrq.h> | 24 | #include <linux/sysrq.h> |
25 | #include <linux/kbd_kern.h> | 25 | #include <linux/kbd_kern.h> |
26 | #include <linux/proc_fs.h> | 26 | #include <linux/proc_fs.h> |
27 | #include <linux/nmi.h> | ||
27 | #include <linux/quotaops.h> | 28 | #include <linux/quotaops.h> |
28 | #include <linux/perf_counter.h> | 29 | #include <linux/perf_counter.h> |
29 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); | |||
222 | 223 | ||
223 | static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) | 224 | static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) |
224 | { | 225 | { |
225 | struct pt_regs *regs = get_irq_regs(); | 226 | /* |
226 | if (regs) { | 227 | * Fall back to the workqueue based printing if the |
227 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | 228 | * backtrace printing did not succeed or the |
228 | show_regs(regs); | 229 | * architecture has no support for it: |
230 | */ | ||
231 | if (!trigger_all_cpu_backtrace()) { | ||
232 | struct pt_regs *regs = get_irq_regs(); | ||
233 | |||
234 | if (regs) { | ||
235 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | ||
236 | show_regs(regs); | ||
237 | } | ||
238 | schedule_work(&sysrq_showallcpus); | ||
229 | } | 239 | } |
230 | schedule_work(&sysrq_showallcpus); | ||
231 | } | 240 | } |
232 | 241 | ||
233 | static struct sysrq_key_op sysrq_showallcpus_op = { | 242 | static struct sysrq_key_op sysrq_showallcpus_op = { |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index aec1931608aa..0b73e4ec1add 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, | |||
450 | goto out_err; | 450 | goto out_err; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* Default timeouts */ | ||
454 | chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
455 | chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); | ||
456 | chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
457 | chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
458 | |||
453 | if (request_locality(chip, 0) != 0) { | 459 | if (request_locality(chip, 0) != 0) { |
454 | rc = -ENODEV; | 460 | rc = -ENODEV; |
455 | goto out_err; | 461 | goto out_err; |
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, | |||
457 | 463 | ||
458 | vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); | 464 | vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); |
459 | 465 | ||
460 | /* Default timeouts */ | ||
461 | chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
462 | chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); | ||
463 | chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
464 | chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | ||
465 | |||
466 | dev_info(dev, | 466 | dev_info(dev, |
467 | "1.2 TPM (device-id 0x%X, rev-id %d)\n", | 467 | "1.2 TPM (device-id 0x%X, rev-id %d)\n", |
468 | vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); | 468 | vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); |
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index 1733d3439ad2..e48af9f79219 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c | |||
@@ -508,8 +508,9 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) | |||
508 | * be obtained while the delayed work queue halt ensures that no more | 508 | * be obtained while the delayed work queue halt ensures that no more |
509 | * data is fed to the ldisc. | 509 | * data is fed to the ldisc. |
510 | * | 510 | * |
511 | * In order to wait for any existing references to complete see | 511 | * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex) |
512 | * tty_ldisc_wait_idle. | 512 | * in order to make sure any currently executing ldisc work is also |
513 | * flushed. | ||
513 | */ | 514 | */ |
514 | 515 | ||
515 | static int tty_ldisc_halt(struct tty_struct *tty) | 516 | static int tty_ldisc_halt(struct tty_struct *tty) |
@@ -753,11 +754,14 @@ void tty_ldisc_hangup(struct tty_struct *tty) | |||
753 | * N_TTY. | 754 | * N_TTY. |
754 | */ | 755 | */ |
755 | if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { | 756 | if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { |
757 | /* Make sure the old ldisc is quiescent */ | ||
758 | tty_ldisc_halt(tty); | ||
759 | flush_scheduled_work(); | ||
760 | |||
756 | /* Avoid racing set_ldisc or tty_ldisc_release */ | 761 | /* Avoid racing set_ldisc or tty_ldisc_release */ |
757 | mutex_lock(&tty->ldisc_mutex); | 762 | mutex_lock(&tty->ldisc_mutex); |
758 | if (tty->ldisc) { /* Not yet closed */ | 763 | if (tty->ldisc) { /* Not yet closed */ |
759 | /* Switch back to N_TTY */ | 764 | /* Switch back to N_TTY */ |
760 | tty_ldisc_halt(tty); | ||
761 | tty_ldisc_reinit(tty); | 765 | tty_ldisc_reinit(tty); |
762 | /* At this point we have a closed ldisc and we want to | 766 | /* At this point we have a closed ldisc and we want to |
763 | reopen it. We could defer this to the next open but | 767 | reopen it. We could defer this to the next open but |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 2964f5f4a7ef..6b3e0c2f33e2 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -40,6 +40,7 @@ struct sh_cmt_priv { | |||
40 | struct platform_device *pdev; | 40 | struct platform_device *pdev; |
41 | 41 | ||
42 | unsigned long flags; | 42 | unsigned long flags; |
43 | unsigned long flags_suspend; | ||
43 | unsigned long match_value; | 44 | unsigned long match_value; |
44 | unsigned long next_match_value; | 45 | unsigned long next_match_value; |
45 | unsigned long max_match_value; | 46 | unsigned long max_match_value; |
@@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev) | |||
667 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | 668 | return -EBUSY; /* cannot unregister clockevent and clocksource */ |
668 | } | 669 | } |
669 | 670 | ||
671 | static int sh_cmt_suspend(struct device *dev) | ||
672 | { | ||
673 | struct platform_device *pdev = to_platform_device(dev); | ||
674 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | ||
675 | |||
676 | /* save flag state and stop CMT channel */ | ||
677 | p->flags_suspend = p->flags; | ||
678 | sh_cmt_stop(p, p->flags); | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int sh_cmt_resume(struct device *dev) | ||
683 | { | ||
684 | struct platform_device *pdev = to_platform_device(dev); | ||
685 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | ||
686 | |||
687 | /* start CMT channel from saved state */ | ||
688 | sh_cmt_start(p, p->flags_suspend); | ||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static struct dev_pm_ops sh_cmt_dev_pm_ops = { | ||
693 | .suspend = sh_cmt_suspend, | ||
694 | .resume = sh_cmt_resume, | ||
695 | }; | ||
696 | |||
670 | static struct platform_driver sh_cmt_device_driver = { | 697 | static struct platform_driver sh_cmt_device_driver = { |
671 | .probe = sh_cmt_probe, | 698 | .probe = sh_cmt_probe, |
672 | .remove = __devexit_p(sh_cmt_remove), | 699 | .remove = __devexit_p(sh_cmt_remove), |
673 | .driver = { | 700 | .driver = { |
674 | .name = "sh_cmt", | 701 | .name = "sh_cmt", |
702 | .pm = &sh_cmt_dev_pm_ops, | ||
675 | } | 703 | } |
676 | }; | 704 | }; |
677 | 705 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fd69086d08d5..2968ed6a9c49 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1250 | { | 1250 | { |
1251 | int ret = 0; | 1251 | int ret = 0; |
1252 | 1252 | ||
1253 | #ifdef __powerpc__ | ||
1254 | int cpu = sysdev->id; | 1253 | int cpu = sysdev->id; |
1255 | unsigned int cur_freq = 0; | ||
1256 | struct cpufreq_policy *cpu_policy; | 1254 | struct cpufreq_policy *cpu_policy; |
1257 | 1255 | ||
1258 | dprintk("suspending cpu %u\n", cpu); | 1256 | dprintk("suspending cpu %u\n", cpu); |
1259 | 1257 | ||
1260 | /* | ||
1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
1262 | * No sane platform should need any of the code below to be run. | ||
1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
1264 | * reenable interrupts on some architectures). | ||
1265 | */ | ||
1266 | |||
1267 | if (!cpu_online(cpu)) | 1258 | if (!cpu_online(cpu)) |
1268 | return 0; | 1259 | return 0; |
1269 | 1260 | ||
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1282 | 1273 | ||
1283 | if (cpufreq_driver->suspend) { | 1274 | if (cpufreq_driver->suspend) { |
1284 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); | 1275 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); |
1285 | if (ret) { | 1276 | if (ret) |
1286 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " | 1277 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " |
1287 | "step on CPU %u\n", cpu_policy->cpu); | 1278 | "step on CPU %u\n", cpu_policy->cpu); |
1288 | goto out; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1292 | if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) | ||
1293 | goto out; | ||
1294 | |||
1295 | if (cpufreq_driver->get) | ||
1296 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1297 | |||
1298 | if (!cur_freq || !cpu_policy->cur) { | ||
1299 | printk(KERN_ERR "cpufreq: suspend failed to assert current " | ||
1300 | "frequency is what timing core thinks it is.\n"); | ||
1301 | goto out; | ||
1302 | } | ||
1303 | |||
1304 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1305 | struct cpufreq_freqs freqs; | ||
1306 | |||
1307 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1308 | dprintk("Warning: CPU frequency is %u, " | ||
1309 | "cpufreq assumed %u kHz.\n", | ||
1310 | cur_freq, cpu_policy->cur); | ||
1311 | |||
1312 | freqs.cpu = cpu; | ||
1313 | freqs.old = cpu_policy->cur; | ||
1314 | freqs.new = cur_freq; | ||
1315 | |||
1316 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | ||
1317 | CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1318 | adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1319 | |||
1320 | cpu_policy->cur = cur_freq; | ||
1321 | } | 1279 | } |
1322 | 1280 | ||
1323 | out: | 1281 | out: |
1324 | cpufreq_cpu_put(cpu_policy); | 1282 | cpufreq_cpu_put(cpu_policy); |
1325 | #endif /* __powerpc__ */ | ||
1326 | return ret; | 1283 | return ret; |
1327 | } | 1284 | } |
1328 | 1285 | ||
@@ -1330,24 +1287,21 @@ out: | |||
1330 | * cpufreq_resume - restore proper CPU frequency handling after resume | 1287 | * cpufreq_resume - restore proper CPU frequency handling after resume |
1331 | * | 1288 | * |
1332 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1289 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) |
1333 | * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync | 1290 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are |
1334 | * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1291 | * restored. It will verify that the current freq is in sync with |
1335 | * restored. | 1292 | * what we believe it to be. This is a bit later than when it |
1293 | * should be, but nonethteless it's better than calling | ||
1294 | * cpufreq_driver->get() here which might re-enable interrupts... | ||
1336 | */ | 1295 | */ |
1337 | static int cpufreq_resume(struct sys_device *sysdev) | 1296 | static int cpufreq_resume(struct sys_device *sysdev) |
1338 | { | 1297 | { |
1339 | int ret = 0; | 1298 | int ret = 0; |
1340 | 1299 | ||
1341 | #ifdef __powerpc__ | ||
1342 | int cpu = sysdev->id; | 1300 | int cpu = sysdev->id; |
1343 | struct cpufreq_policy *cpu_policy; | 1301 | struct cpufreq_policy *cpu_policy; |
1344 | 1302 | ||
1345 | dprintk("resuming cpu %u\n", cpu); | 1303 | dprintk("resuming cpu %u\n", cpu); |
1346 | 1304 | ||
1347 | /* As with the ->suspend method, all the code below is | ||
1348 | * only necessary because Powerbooks suck. | ||
1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
1350 | |||
1351 | if (!cpu_online(cpu)) | 1305 | if (!cpu_online(cpu)) |
1352 | return 0; | 1306 | return 0; |
1353 | 1307 | ||
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev) | |||
1373 | } | 1327 | } |
1374 | } | 1328 | } |
1375 | 1329 | ||
1376 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
1377 | unsigned int cur_freq = 0; | ||
1378 | |||
1379 | if (cpufreq_driver->get) | ||
1380 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1381 | |||
1382 | if (!cur_freq || !cpu_policy->cur) { | ||
1383 | printk(KERN_ERR "cpufreq: resume failed to assert " | ||
1384 | "current frequency is what timing core " | ||
1385 | "thinks it is.\n"); | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1390 | struct cpufreq_freqs freqs; | ||
1391 | |||
1392 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1393 | dprintk("Warning: CPU frequency " | ||
1394 | "is %u, cpufreq assumed %u kHz.\n", | ||
1395 | cur_freq, cpu_policy->cur); | ||
1396 | |||
1397 | freqs.cpu = cpu; | ||
1398 | freqs.old = cpu_policy->cur; | ||
1399 | freqs.new = cur_freq; | ||
1400 | |||
1401 | srcu_notifier_call_chain( | ||
1402 | &cpufreq_transition_notifier_list, | ||
1403 | CPUFREQ_RESUMECHANGE, &freqs); | ||
1404 | adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); | ||
1405 | |||
1406 | cpu_policy->cur = cur_freq; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | out: | ||
1411 | schedule_work(&cpu_policy->update); | 1330 | schedule_work(&cpu_policy->update); |
1331 | |||
1412 | fail: | 1332 | fail: |
1413 | cpufreq_cpu_put(cpu_policy); | 1333 | cpufreq_cpu_put(cpu_policy); |
1414 | #endif /* __powerpc__ */ | ||
1415 | return ret; | 1334 | return ret; |
1416 | } | 1335 | } |
1417 | 1336 | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5b27692372bf..b08403d7d1ca 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -13,7 +13,6 @@ if CRYPTO_HW | |||
13 | config CRYPTO_DEV_PADLOCK | 13 | config CRYPTO_DEV_PADLOCK |
14 | tristate "Support for VIA PadLock ACE" | 14 | tristate "Support for VIA PadLock ACE" |
15 | depends on X86 && !UML | 15 | depends on X86 && !UML |
16 | select CRYPTO_ALGAPI | ||
17 | help | 16 | help |
18 | Some VIA processors come with an integrated crypto engine | 17 | Some VIA processors come with an integrated crypto engine |
19 | (so called VIA PadLock ACE, Advanced Cryptography Engine) | 18 | (so called VIA PadLock ACE, Advanced Cryptography Engine) |
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES | |||
39 | config CRYPTO_DEV_PADLOCK_SHA | 38 | config CRYPTO_DEV_PADLOCK_SHA |
40 | tristate "PadLock driver for SHA1 and SHA256 algorithms" | 39 | tristate "PadLock driver for SHA1 and SHA256 algorithms" |
41 | depends on CRYPTO_DEV_PADLOCK | 40 | depends on CRYPTO_DEV_PADLOCK |
41 | select CRYPTO_HASH | ||
42 | select CRYPTO_SHA1 | 42 | select CRYPTO_SHA1 |
43 | select CRYPTO_SHA256 | 43 | select CRYPTO_SHA256 |
44 | help | 44 | help |
@@ -157,6 +157,19 @@ config S390_PRNG | |||
157 | ANSI X9.17 standard. The PRNG is usable via the char device | 157 | ANSI X9.17 standard. The PRNG is usable via the char device |
158 | /dev/prandom. | 158 | /dev/prandom. |
159 | 159 | ||
160 | config CRYPTO_DEV_MV_CESA | ||
161 | tristate "Marvell's Cryptographic Engine" | ||
162 | depends on PLAT_ORION | ||
163 | select CRYPTO_ALGAPI | ||
164 | select CRYPTO_AES | ||
165 | select CRYPTO_BLKCIPHER2 | ||
166 | help | ||
167 | This driver allows you to utilize the Cryptographic Engines and | ||
168 | Security Accelerator (CESA) which can be found on the Marvell Orion | ||
169 | and Kirkwood SoCs, such as QNAP's TS-209. | ||
170 | |||
171 | Currently the driver supports AES in ECB and CBC mode without DMA. | ||
172 | |||
160 | config CRYPTO_DEV_HIFN_795X | 173 | config CRYPTO_DEV_HIFN_795X |
161 | tristate "Driver HIFN 795x crypto accelerator chips" | 174 | tristate "Driver HIFN 795x crypto accelerator chips" |
162 | select CRYPTO_DES | 175 | select CRYPTO_DES |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 9bf4a2bc8846..6ffcb3f7f942 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | |||
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
5 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | ||
5 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 6 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
6 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 7 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
7 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | 8 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 61b6e1bec8c6..a33243c17b00 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); | 211 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
212 | sizeof(struct crypto4xx_ctx)); | ||
212 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 213 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; |
213 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, | 214 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, |
214 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, | 215 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 4c0dfb2b872e..46e899ac924e 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -31,8 +31,6 @@ | |||
31 | #include <asm/dcr.h> | 31 | #include <asm/dcr.h> |
32 | #include <asm/dcr-regs.h> | 32 | #include <asm/dcr-regs.h> |
33 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
34 | #include <crypto/internal/hash.h> | ||
35 | #include <crypto/algapi.h> | ||
36 | #include <crypto/aes.h> | 34 | #include <crypto/aes.h> |
37 | #include <crypto/sha.h> | 35 | #include <crypto/sha.h> |
38 | #include "crypto4xx_reg_def.h" | 36 | #include "crypto4xx_reg_def.h" |
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm) | |||
998 | ctx->sa_out_dma_addr = 0; | 996 | ctx->sa_out_dma_addr = 0; |
999 | ctx->sa_len = 0; | 997 | ctx->sa_len = 0; |
1000 | 998 | ||
1001 | if (alg->cra_type == &crypto_ablkcipher_type) | 999 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
1000 | default: | ||
1002 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); | 1001 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); |
1003 | else if (alg->cra_type == &crypto_ahash_type) | 1002 | break; |
1004 | tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); | 1003 | case CRYPTO_ALG_TYPE_AHASH: |
1004 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1005 | sizeof(struct crypto4xx_ctx)); | ||
1006 | break; | ||
1007 | } | ||
1005 | 1008 | ||
1006 | return 0; | 1009 | return 0; |
1007 | } | 1010 | } |
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm) | |||
1015 | } | 1018 | } |
1016 | 1019 | ||
1017 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | 1020 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, |
1018 | struct crypto_alg *crypto_alg, int array_size) | 1021 | struct crypto4xx_alg_common *crypto_alg, |
1022 | int array_size) | ||
1019 | { | 1023 | { |
1020 | struct crypto4xx_alg *alg; | 1024 | struct crypto4xx_alg *alg; |
1021 | int i; | 1025 | int i; |
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | |||
1027 | return -ENOMEM; | 1031 | return -ENOMEM; |
1028 | 1032 | ||
1029 | alg->alg = crypto_alg[i]; | 1033 | alg->alg = crypto_alg[i]; |
1030 | INIT_LIST_HEAD(&alg->alg.cra_list); | ||
1031 | if (alg->alg.cra_init == NULL) | ||
1032 | alg->alg.cra_init = crypto4xx_alg_init; | ||
1033 | if (alg->alg.cra_exit == NULL) | ||
1034 | alg->alg.cra_exit = crypto4xx_alg_exit; | ||
1035 | alg->dev = sec_dev; | 1034 | alg->dev = sec_dev; |
1036 | rc = crypto_register_alg(&alg->alg); | 1035 | |
1036 | switch (alg->alg.type) { | ||
1037 | case CRYPTO_ALG_TYPE_AHASH: | ||
1038 | rc = crypto_register_ahash(&alg->alg.u.hash); | ||
1039 | break; | ||
1040 | |||
1041 | default: | ||
1042 | rc = crypto_register_alg(&alg->alg.u.cipher); | ||
1043 | break; | ||
1044 | } | ||
1045 | |||
1037 | if (rc) { | 1046 | if (rc) { |
1038 | list_del(&alg->entry); | 1047 | list_del(&alg->entry); |
1039 | kfree(alg); | 1048 | kfree(alg); |
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) | |||
1051 | 1060 | ||
1052 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { | 1061 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { |
1053 | list_del(&alg->entry); | 1062 | list_del(&alg->entry); |
1054 | crypto_unregister_alg(&alg->alg); | 1063 | switch (alg->alg.type) { |
1064 | case CRYPTO_ALG_TYPE_AHASH: | ||
1065 | crypto_unregister_ahash(&alg->alg.u.hash); | ||
1066 | break; | ||
1067 | |||
1068 | default: | ||
1069 | crypto_unregister_alg(&alg->alg.u.cipher); | ||
1070 | } | ||
1055 | kfree(alg); | 1071 | kfree(alg); |
1056 | } | 1072 | } |
1057 | } | 1073 | } |
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | |||
1104 | /** | 1120 | /** |
1105 | * Supported Crypto Algorithms | 1121 | * Supported Crypto Algorithms |
1106 | */ | 1122 | */ |
1107 | struct crypto_alg crypto4xx_alg[] = { | 1123 | struct crypto4xx_alg_common crypto4xx_alg[] = { |
1108 | /* Crypto AES modes */ | 1124 | /* Crypto AES modes */ |
1109 | { | 1125 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { |
1110 | .cra_name = "cbc(aes)", | 1126 | .cra_name = "cbc(aes)", |
1111 | .cra_driver_name = "cbc-aes-ppc4xx", | 1127 | .cra_driver_name = "cbc-aes-ppc4xx", |
1112 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1128 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1113 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1129 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1114 | .cra_blocksize = AES_BLOCK_SIZE, | 1130 | .cra_blocksize = AES_BLOCK_SIZE, |
1115 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1131 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1116 | .cra_alignmask = 0, | ||
1117 | .cra_type = &crypto_ablkcipher_type, | 1132 | .cra_type = &crypto_ablkcipher_type, |
1133 | .cra_init = crypto4xx_alg_init, | ||
1134 | .cra_exit = crypto4xx_alg_exit, | ||
1118 | .cra_module = THIS_MODULE, | 1135 | .cra_module = THIS_MODULE, |
1119 | .cra_u = { | 1136 | .cra_u = { |
1120 | .ablkcipher = { | 1137 | .ablkcipher = { |
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = { | |||
1126 | .decrypt = crypto4xx_decrypt, | 1143 | .decrypt = crypto4xx_decrypt, |
1127 | } | 1144 | } |
1128 | } | 1145 | } |
1129 | }, | 1146 | }}, |
1130 | /* Hash SHA1 */ | ||
1131 | { | ||
1132 | .cra_name = "sha1", | ||
1133 | .cra_driver_name = "sha1-ppc4xx", | ||
1134 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1135 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
1136 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1137 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1138 | .cra_alignmask = 0, | ||
1139 | .cra_type = &crypto_ahash_type, | ||
1140 | .cra_init = crypto4xx_sha1_alg_init, | ||
1141 | .cra_module = THIS_MODULE, | ||
1142 | .cra_u = { | ||
1143 | .ahash = { | ||
1144 | .digestsize = SHA1_DIGEST_SIZE, | ||
1145 | .init = crypto4xx_hash_init, | ||
1146 | .update = crypto4xx_hash_update, | ||
1147 | .final = crypto4xx_hash_final, | ||
1148 | .digest = crypto4xx_hash_digest, | ||
1149 | } | ||
1150 | } | ||
1151 | }, | ||
1152 | }; | 1147 | }; |
1153 | 1148 | ||
1154 | /** | 1149 | /** |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 1ef103449364..da9cbe3b9fc3 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #ifndef __CRYPTO4XX_CORE_H__ | 22 | #ifndef __CRYPTO4XX_CORE_H__ |
23 | #define __CRYPTO4XX_CORE_H__ | 23 | #define __CRYPTO4XX_CORE_H__ |
24 | 24 | ||
25 | #include <crypto/internal/hash.h> | ||
26 | |||
25 | #define PPC460SX_SDR0_SRST 0x201 | 27 | #define PPC460SX_SDR0_SRST 0x201 |
26 | #define PPC405EX_SDR0_SRST 0x200 | 28 | #define PPC405EX_SDR0_SRST 0x200 |
27 | #define PPC460EX_SDR0_SRST 0x201 | 29 | #define PPC460EX_SDR0_SRST 0x201 |
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx { | |||
138 | u16 sa_len; | 140 | u16 sa_len; |
139 | }; | 141 | }; |
140 | 142 | ||
143 | struct crypto4xx_alg_common { | ||
144 | u32 type; | ||
145 | union { | ||
146 | struct crypto_alg cipher; | ||
147 | struct ahash_alg hash; | ||
148 | } u; | ||
149 | }; | ||
150 | |||
141 | struct crypto4xx_alg { | 151 | struct crypto4xx_alg { |
142 | struct list_head entry; | 152 | struct list_head entry; |
143 | struct crypto_alg alg; | 153 | struct crypto4xx_alg_common alg; |
144 | struct crypto4xx_device *dev; | 154 | struct crypto4xx_device *dev; |
145 | }; | 155 | }; |
146 | 156 | ||
147 | #define crypto_alg_to_crypto4xx_alg(x) \ | 157 | static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( |
148 | container_of(x, struct crypto4xx_alg, alg) | 158 | struct crypto_alg *x) |
159 | { | ||
160 | switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
161 | case CRYPTO_ALG_TYPE_AHASH: | ||
162 | return container_of(__crypto_ahash_alg(x), | ||
163 | struct crypto4xx_alg, alg.u.hash); | ||
164 | } | ||
165 | |||
166 | return container_of(x, struct crypto4xx_alg, alg.u.cipher); | ||
167 | } | ||
149 | 168 | ||
150 | extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); | 169 | extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); |
151 | extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); | 170 | extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c new file mode 100644 index 000000000000..b21ef635f352 --- /dev/null +++ b/drivers/crypto/mv_cesa.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * Support for Marvell's crypto engine which can be found on some Orion5X | ||
3 | * boards. | ||
4 | * | ||
5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
6 | * License: GPLv2 | ||
7 | * | ||
8 | */ | ||
9 | #include <crypto/aes.h> | ||
10 | #include <crypto/algapi.h> | ||
11 | #include <linux/crypto.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/scatterlist.h> | ||
17 | |||
18 | #include "mv_cesa.h" | ||
19 | /* | ||
20 | * STM: | ||
21 | * /---------------------------------------\ | ||
22 | * | | request complete | ||
23 | * \./ | | ||
24 | * IDLE -> new request -> BUSY -> done -> DEQUEUE | ||
25 | * /°\ | | ||
26 | * | | more scatter entries | ||
27 | * \________________/ | ||
28 | */ | ||
29 | enum engine_status { | ||
30 | ENGINE_IDLE, | ||
31 | ENGINE_BUSY, | ||
32 | ENGINE_W_DEQUEUE, | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct req_progress - used for every crypt request | ||
37 | * @src_sg_it: sg iterator for src | ||
38 | * @dst_sg_it: sg iterator for dst | ||
39 | * @sg_src_left: bytes left in src to process (scatter list) | ||
40 | * @src_start: offset to add to src start position (scatter list) | ||
41 | * @crypt_len: length of current crypt process | ||
42 | * @sg_dst_left: bytes left dst to process in this scatter list | ||
43 | * @dst_start: offset to add to dst start position (scatter list) | ||
44 | * @total_req_bytes: total number of bytes processed (request). | ||
45 | * | ||
46 | * sg helper are used to iterate over the scatterlist. Since the size of the | ||
47 | * SRAM may be less than the scatter size, this struct struct is used to keep | ||
48 | * track of progress within current scatterlist. | ||
49 | */ | ||
50 | struct req_progress { | ||
51 | struct sg_mapping_iter src_sg_it; | ||
52 | struct sg_mapping_iter dst_sg_it; | ||
53 | |||
54 | /* src mostly */ | ||
55 | int sg_src_left; | ||
56 | int src_start; | ||
57 | int crypt_len; | ||
58 | /* dst mostly */ | ||
59 | int sg_dst_left; | ||
60 | int dst_start; | ||
61 | int total_req_bytes; | ||
62 | }; | ||
63 | |||
64 | struct crypto_priv { | ||
65 | void __iomem *reg; | ||
66 | void __iomem *sram; | ||
67 | int irq; | ||
68 | struct task_struct *queue_th; | ||
69 | |||
70 | /* the lock protects queue and eng_st */ | ||
71 | spinlock_t lock; | ||
72 | struct crypto_queue queue; | ||
73 | enum engine_status eng_st; | ||
74 | struct ablkcipher_request *cur_req; | ||
75 | struct req_progress p; | ||
76 | int max_req_size; | ||
77 | int sram_size; | ||
78 | }; | ||
79 | |||
80 | static struct crypto_priv *cpg; | ||
81 | |||
82 | struct mv_ctx { | ||
83 | u8 aes_enc_key[AES_KEY_LEN]; | ||
84 | u32 aes_dec_key[8]; | ||
85 | int key_len; | ||
86 | u32 need_calc_aes_dkey; | ||
87 | }; | ||
88 | |||
89 | enum crypto_op { | ||
90 | COP_AES_ECB, | ||
91 | COP_AES_CBC, | ||
92 | }; | ||
93 | |||
94 | struct mv_req_ctx { | ||
95 | enum crypto_op op; | ||
96 | int decrypt; | ||
97 | }; | ||
98 | |||
99 | static void compute_aes_dec_key(struct mv_ctx *ctx) | ||
100 | { | ||
101 | struct crypto_aes_ctx gen_aes_key; | ||
102 | int key_pos; | ||
103 | |||
104 | if (!ctx->need_calc_aes_dkey) | ||
105 | return; | ||
106 | |||
107 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | ||
108 | |||
109 | key_pos = ctx->key_len + 24; | ||
110 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | ||
111 | switch (ctx->key_len) { | ||
112 | case AES_KEYSIZE_256: | ||
113 | key_pos -= 2; | ||
114 | /* fall */ | ||
115 | case AES_KEYSIZE_192: | ||
116 | key_pos -= 2; | ||
117 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | ||
118 | 4 * 4); | ||
119 | break; | ||
120 | } | ||
121 | ctx->need_calc_aes_dkey = 0; | ||
122 | } | ||
123 | |||
124 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | ||
125 | unsigned int len) | ||
126 | { | ||
127 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
128 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
129 | |||
130 | switch (len) { | ||
131 | case AES_KEYSIZE_128: | ||
132 | case AES_KEYSIZE_192: | ||
133 | case AES_KEYSIZE_256: | ||
134 | break; | ||
135 | default: | ||
136 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | ctx->key_len = len; | ||
140 | ctx->need_calc_aes_dkey = 1; | ||
141 | |||
142 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void setup_data_in(struct ablkcipher_request *req) | ||
147 | { | ||
148 | int ret; | ||
149 | void *buf; | ||
150 | |||
151 | if (!cpg->p.sg_src_left) { | ||
152 | ret = sg_miter_next(&cpg->p.src_sg_it); | ||
153 | BUG_ON(!ret); | ||
154 | cpg->p.sg_src_left = cpg->p.src_sg_it.length; | ||
155 | cpg->p.src_start = 0; | ||
156 | } | ||
157 | |||
158 | cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); | ||
159 | |||
160 | buf = cpg->p.src_sg_it.addr; | ||
161 | buf += cpg->p.src_start; | ||
162 | |||
163 | memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); | ||
164 | |||
165 | cpg->p.sg_src_left -= cpg->p.crypt_len; | ||
166 | cpg->p.src_start += cpg->p.crypt_len; | ||
167 | } | ||
168 | |||
169 | static void mv_process_current_q(int first_block) | ||
170 | { | ||
171 | struct ablkcipher_request *req = cpg->cur_req; | ||
172 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
173 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
174 | struct sec_accel_config op; | ||
175 | |||
176 | switch (req_ctx->op) { | ||
177 | case COP_AES_ECB: | ||
178 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | ||
179 | break; | ||
180 | case COP_AES_CBC: | ||
181 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | ||
182 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | ||
183 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | ||
184 | if (first_block) | ||
185 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | ||
186 | break; | ||
187 | } | ||
188 | if (req_ctx->decrypt) { | ||
189 | op.config |= CFG_DIR_DEC; | ||
190 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | ||
191 | AES_KEY_LEN); | ||
192 | } else { | ||
193 | op.config |= CFG_DIR_ENC; | ||
194 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | ||
195 | AES_KEY_LEN); | ||
196 | } | ||
197 | |||
198 | switch (ctx->key_len) { | ||
199 | case AES_KEYSIZE_128: | ||
200 | op.config |= CFG_AES_LEN_128; | ||
201 | break; | ||
202 | case AES_KEYSIZE_192: | ||
203 | op.config |= CFG_AES_LEN_192; | ||
204 | break; | ||
205 | case AES_KEYSIZE_256: | ||
206 | op.config |= CFG_AES_LEN_256; | ||
207 | break; | ||
208 | } | ||
209 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | ||
210 | ENC_P_DST(SRAM_DATA_OUT_START); | ||
211 | op.enc_key_p = SRAM_DATA_KEY_P; | ||
212 | |||
213 | setup_data_in(req); | ||
214 | op.enc_len = cpg->p.crypt_len; | ||
215 | memcpy(cpg->sram + SRAM_CONFIG, &op, | ||
216 | sizeof(struct sec_accel_config)); | ||
217 | |||
218 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
219 | /* GO */ | ||
220 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
221 | |||
222 | /* | ||
223 | * XXX: add timer if the interrupt does not occur for some mystery | ||
224 | * reason | ||
225 | */ | ||
226 | } | ||
227 | |||
228 | static void mv_crypto_algo_completion(void) | ||
229 | { | ||
230 | struct ablkcipher_request *req = cpg->cur_req; | ||
231 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
232 | |||
233 | if (req_ctx->op != COP_AES_CBC) | ||
234 | return ; | ||
235 | |||
236 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | ||
237 | } | ||
238 | |||
239 | static void dequeue_complete_req(void) | ||
240 | { | ||
241 | struct ablkcipher_request *req = cpg->cur_req; | ||
242 | void *buf; | ||
243 | int ret; | ||
244 | |||
245 | cpg->p.total_req_bytes += cpg->p.crypt_len; | ||
246 | do { | ||
247 | int dst_copy; | ||
248 | |||
249 | if (!cpg->p.sg_dst_left) { | ||
250 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
251 | BUG_ON(!ret); | ||
252 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
253 | cpg->p.dst_start = 0; | ||
254 | } | ||
255 | |||
256 | buf = cpg->p.dst_sg_it.addr; | ||
257 | buf += cpg->p.dst_start; | ||
258 | |||
259 | dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); | ||
260 | |||
261 | memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); | ||
262 | |||
263 | cpg->p.sg_dst_left -= dst_copy; | ||
264 | cpg->p.crypt_len -= dst_copy; | ||
265 | cpg->p.dst_start += dst_copy; | ||
266 | } while (cpg->p.crypt_len > 0); | ||
267 | |||
268 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | ||
269 | if (cpg->p.total_req_bytes < req->nbytes) { | ||
270 | /* process next scatter list entry */ | ||
271 | cpg->eng_st = ENGINE_BUSY; | ||
272 | mv_process_current_q(0); | ||
273 | } else { | ||
274 | sg_miter_stop(&cpg->p.src_sg_it); | ||
275 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
276 | mv_crypto_algo_completion(); | ||
277 | cpg->eng_st = ENGINE_IDLE; | ||
278 | req->base.complete(&req->base, 0); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | ||
283 | { | ||
284 | int i = 0; | ||
285 | |||
286 | do { | ||
287 | total_bytes -= sl[i].length; | ||
288 | i++; | ||
289 | |||
290 | } while (total_bytes > 0); | ||
291 | |||
292 | return i; | ||
293 | } | ||
294 | |||
295 | static void mv_enqueue_new_req(struct ablkcipher_request *req) | ||
296 | { | ||
297 | int num_sgs; | ||
298 | |||
299 | cpg->cur_req = req; | ||
300 | memset(&cpg->p, 0, sizeof(struct req_progress)); | ||
301 | |||
302 | num_sgs = count_sgs(req->src, req->nbytes); | ||
303 | sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
304 | |||
305 | num_sgs = count_sgs(req->dst, req->nbytes); | ||
306 | sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | ||
307 | mv_process_current_q(1); | ||
308 | } | ||
309 | |||
310 | static int queue_manag(void *data) | ||
311 | { | ||
312 | cpg->eng_st = ENGINE_IDLE; | ||
313 | do { | ||
314 | struct ablkcipher_request *req; | ||
315 | struct crypto_async_request *async_req = NULL; | ||
316 | struct crypto_async_request *backlog; | ||
317 | |||
318 | __set_current_state(TASK_INTERRUPTIBLE); | ||
319 | |||
320 | if (cpg->eng_st == ENGINE_W_DEQUEUE) | ||
321 | dequeue_complete_req(); | ||
322 | |||
323 | spin_lock_irq(&cpg->lock); | ||
324 | if (cpg->eng_st == ENGINE_IDLE) { | ||
325 | backlog = crypto_get_backlog(&cpg->queue); | ||
326 | async_req = crypto_dequeue_request(&cpg->queue); | ||
327 | if (async_req) { | ||
328 | BUG_ON(cpg->eng_st != ENGINE_IDLE); | ||
329 | cpg->eng_st = ENGINE_BUSY; | ||
330 | } | ||
331 | } | ||
332 | spin_unlock_irq(&cpg->lock); | ||
333 | |||
334 | if (backlog) { | ||
335 | backlog->complete(backlog, -EINPROGRESS); | ||
336 | backlog = NULL; | ||
337 | } | ||
338 | |||
339 | if (async_req) { | ||
340 | req = container_of(async_req, | ||
341 | struct ablkcipher_request, base); | ||
342 | mv_enqueue_new_req(req); | ||
343 | async_req = NULL; | ||
344 | } | ||
345 | |||
346 | schedule(); | ||
347 | |||
348 | } while (!kthread_should_stop()); | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static int mv_handle_req(struct ablkcipher_request *req) | ||
353 | { | ||
354 | unsigned long flags; | ||
355 | int ret; | ||
356 | |||
357 | spin_lock_irqsave(&cpg->lock, flags); | ||
358 | ret = ablkcipher_enqueue_request(&cpg->queue, req); | ||
359 | spin_unlock_irqrestore(&cpg->lock, flags); | ||
360 | wake_up_process(cpg->queue_th); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | ||
365 | { | ||
366 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
367 | |||
368 | req_ctx->op = COP_AES_ECB; | ||
369 | req_ctx->decrypt = 0; | ||
370 | |||
371 | return mv_handle_req(req); | ||
372 | } | ||
373 | |||
374 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | ||
375 | { | ||
376 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
377 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
378 | |||
379 | req_ctx->op = COP_AES_ECB; | ||
380 | req_ctx->decrypt = 1; | ||
381 | |||
382 | compute_aes_dec_key(ctx); | ||
383 | return mv_handle_req(req); | ||
384 | } | ||
385 | |||
386 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | ||
387 | { | ||
388 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
389 | |||
390 | req_ctx->op = COP_AES_CBC; | ||
391 | req_ctx->decrypt = 0; | ||
392 | |||
393 | return mv_handle_req(req); | ||
394 | } | ||
395 | |||
396 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | ||
397 | { | ||
398 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
399 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
400 | |||
401 | req_ctx->op = COP_AES_CBC; | ||
402 | req_ctx->decrypt = 1; | ||
403 | |||
404 | compute_aes_dec_key(ctx); | ||
405 | return mv_handle_req(req); | ||
406 | } | ||
407 | |||
408 | static int mv_cra_init(struct crypto_tfm *tfm) | ||
409 | { | ||
410 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | irqreturn_t crypto_int(int irq, void *priv) | ||
415 | { | ||
416 | u32 val; | ||
417 | |||
418 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | ||
419 | if (!(val & SEC_INT_ACCEL0_DONE)) | ||
420 | return IRQ_NONE; | ||
421 | |||
422 | val &= ~SEC_INT_ACCEL0_DONE; | ||
423 | writel(val, cpg->reg + FPGA_INT_STATUS); | ||
424 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
425 | BUG_ON(cpg->eng_st != ENGINE_BUSY); | ||
426 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
427 | wake_up_process(cpg->queue_th); | ||
428 | return IRQ_HANDLED; | ||
429 | } | ||
430 | |||
431 | struct crypto_alg mv_aes_alg_ecb = { | ||
432 | .cra_name = "ecb(aes)", | ||
433 | .cra_driver_name = "mv-ecb-aes", | ||
434 | .cra_priority = 300, | ||
435 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
436 | .cra_blocksize = 16, | ||
437 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
438 | .cra_alignmask = 0, | ||
439 | .cra_type = &crypto_ablkcipher_type, | ||
440 | .cra_module = THIS_MODULE, | ||
441 | .cra_init = mv_cra_init, | ||
442 | .cra_u = { | ||
443 | .ablkcipher = { | ||
444 | .min_keysize = AES_MIN_KEY_SIZE, | ||
445 | .max_keysize = AES_MAX_KEY_SIZE, | ||
446 | .setkey = mv_setkey_aes, | ||
447 | .encrypt = mv_enc_aes_ecb, | ||
448 | .decrypt = mv_dec_aes_ecb, | ||
449 | }, | ||
450 | }, | ||
451 | }; | ||
452 | |||
453 | struct crypto_alg mv_aes_alg_cbc = { | ||
454 | .cra_name = "cbc(aes)", | ||
455 | .cra_driver_name = "mv-cbc-aes", | ||
456 | .cra_priority = 300, | ||
457 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
458 | .cra_blocksize = AES_BLOCK_SIZE, | ||
459 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
460 | .cra_alignmask = 0, | ||
461 | .cra_type = &crypto_ablkcipher_type, | ||
462 | .cra_module = THIS_MODULE, | ||
463 | .cra_init = mv_cra_init, | ||
464 | .cra_u = { | ||
465 | .ablkcipher = { | ||
466 | .ivsize = AES_BLOCK_SIZE, | ||
467 | .min_keysize = AES_MIN_KEY_SIZE, | ||
468 | .max_keysize = AES_MAX_KEY_SIZE, | ||
469 | .setkey = mv_setkey_aes, | ||
470 | .encrypt = mv_enc_aes_cbc, | ||
471 | .decrypt = mv_dec_aes_cbc, | ||
472 | }, | ||
473 | }, | ||
474 | }; | ||
475 | |||
476 | static int mv_probe(struct platform_device *pdev) | ||
477 | { | ||
478 | struct crypto_priv *cp; | ||
479 | struct resource *res; | ||
480 | int irq; | ||
481 | int ret; | ||
482 | |||
483 | if (cpg) { | ||
484 | printk(KERN_ERR "Second crypto dev?\n"); | ||
485 | return -EEXIST; | ||
486 | } | ||
487 | |||
488 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
489 | if (!res) | ||
490 | return -ENXIO; | ||
491 | |||
492 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
493 | if (!cp) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | spin_lock_init(&cp->lock); | ||
497 | crypto_init_queue(&cp->queue, 50); | ||
498 | cp->reg = ioremap(res->start, res->end - res->start + 1); | ||
499 | if (!cp->reg) { | ||
500 | ret = -ENOMEM; | ||
501 | goto err; | ||
502 | } | ||
503 | |||
504 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | ||
505 | if (!res) { | ||
506 | ret = -ENXIO; | ||
507 | goto err_unmap_reg; | ||
508 | } | ||
509 | cp->sram_size = res->end - res->start + 1; | ||
510 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | ||
511 | cp->sram = ioremap(res->start, cp->sram_size); | ||
512 | if (!cp->sram) { | ||
513 | ret = -ENOMEM; | ||
514 | goto err_unmap_reg; | ||
515 | } | ||
516 | |||
517 | irq = platform_get_irq(pdev, 0); | ||
518 | if (irq < 0 || irq == NO_IRQ) { | ||
519 | ret = irq; | ||
520 | goto err_unmap_sram; | ||
521 | } | ||
522 | cp->irq = irq; | ||
523 | |||
524 | platform_set_drvdata(pdev, cp); | ||
525 | cpg = cp; | ||
526 | |||
527 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | ||
528 | if (IS_ERR(cp->queue_th)) { | ||
529 | ret = PTR_ERR(cp->queue_th); | ||
530 | goto err_thread; | ||
531 | } | ||
532 | |||
533 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | ||
534 | cp); | ||
535 | if (ret) | ||
536 | goto err_unmap_sram; | ||
537 | |||
538 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | ||
539 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | ||
540 | |||
541 | ret = crypto_register_alg(&mv_aes_alg_ecb); | ||
542 | if (ret) | ||
543 | goto err_reg; | ||
544 | |||
545 | ret = crypto_register_alg(&mv_aes_alg_cbc); | ||
546 | if (ret) | ||
547 | goto err_unreg_ecb; | ||
548 | return 0; | ||
549 | err_unreg_ecb: | ||
550 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
551 | err_thread: | ||
552 | free_irq(irq, cp); | ||
553 | err_reg: | ||
554 | kthread_stop(cp->queue_th); | ||
555 | err_unmap_sram: | ||
556 | iounmap(cp->sram); | ||
557 | err_unmap_reg: | ||
558 | iounmap(cp->reg); | ||
559 | err: | ||
560 | kfree(cp); | ||
561 | cpg = NULL; | ||
562 | platform_set_drvdata(pdev, NULL); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int mv_remove(struct platform_device *pdev) | ||
567 | { | ||
568 | struct crypto_priv *cp = platform_get_drvdata(pdev); | ||
569 | |||
570 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
571 | crypto_unregister_alg(&mv_aes_alg_cbc); | ||
572 | kthread_stop(cp->queue_th); | ||
573 | free_irq(cp->irq, cp); | ||
574 | memset(cp->sram, 0, cp->sram_size); | ||
575 | iounmap(cp->sram); | ||
576 | iounmap(cp->reg); | ||
577 | kfree(cp); | ||
578 | cpg = NULL; | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static struct platform_driver marvell_crypto = { | ||
583 | .probe = mv_probe, | ||
584 | .remove = mv_remove, | ||
585 | .driver = { | ||
586 | .owner = THIS_MODULE, | ||
587 | .name = "mv_crypto", | ||
588 | }, | ||
589 | }; | ||
590 | MODULE_ALIAS("platform:mv_crypto"); | ||
591 | |||
592 | static int __init mv_crypto_init(void) | ||
593 | { | ||
594 | return platform_driver_register(&marvell_crypto); | ||
595 | } | ||
596 | module_init(mv_crypto_init); | ||
597 | |||
598 | static void __exit mv_crypto_exit(void) | ||
599 | { | ||
600 | platform_driver_unregister(&marvell_crypto); | ||
601 | } | ||
602 | module_exit(mv_crypto_exit); | ||
603 | |||
604 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); | ||
605 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | ||
606 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h new file mode 100644 index 000000000000..c3e25d3bb171 --- /dev/null +++ b/drivers/crypto/mv_cesa.h | |||
@@ -0,0 +1,119 @@ | |||
1 | #ifndef __MV_CRYPTO_H__ | ||
2 | |||
3 | #define DIGEST_INITIAL_VAL_A 0xdd00 | ||
4 | #define DES_CMD_REG 0xdd58 | ||
5 | |||
6 | #define SEC_ACCEL_CMD 0xde00 | ||
7 | #define SEC_CMD_EN_SEC_ACCL0 (1 << 0) | ||
8 | #define SEC_CMD_EN_SEC_ACCL1 (1 << 1) | ||
9 | #define SEC_CMD_DISABLE_SEC (1 << 2) | ||
10 | |||
11 | #define SEC_ACCEL_DESC_P0 0xde04 | ||
12 | #define SEC_DESC_P0_PTR(x) (x) | ||
13 | |||
14 | #define SEC_ACCEL_DESC_P1 0xde14 | ||
15 | #define SEC_DESC_P1_PTR(x) (x) | ||
16 | |||
17 | #define SEC_ACCEL_CFG 0xde08 | ||
18 | #define SEC_CFG_STOP_DIG_ERR (1 << 0) | ||
19 | #define SEC_CFG_CH0_W_IDMA (1 << 7) | ||
20 | #define SEC_CFG_CH1_W_IDMA (1 << 8) | ||
21 | #define SEC_CFG_ACT_CH0_IDMA (1 << 9) | ||
22 | #define SEC_CFG_ACT_CH1_IDMA (1 << 10) | ||
23 | |||
24 | #define SEC_ACCEL_STATUS 0xde0c | ||
25 | #define SEC_ST_ACT_0 (1 << 0) | ||
26 | #define SEC_ST_ACT_1 (1 << 1) | ||
27 | |||
28 | /* | ||
29 | * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata | ||
30 | * 4.12. It looks like that it was part of an IRQ-controller in FPGA and | ||
31 | * someone forgot to remove it while switching to the core and moving to | ||
32 | * SEC_ACCEL_INT_STATUS. | ||
33 | */ | ||
34 | #define FPGA_INT_STATUS 0xdd68 | ||
35 | #define SEC_ACCEL_INT_STATUS 0xde20 | ||
36 | #define SEC_INT_AUTH_DONE (1 << 0) | ||
37 | #define SEC_INT_DES_E_DONE (1 << 1) | ||
38 | #define SEC_INT_AES_E_DONE (1 << 2) | ||
39 | #define SEC_INT_AES_D_DONE (1 << 3) | ||
40 | #define SEC_INT_ENC_DONE (1 << 4) | ||
41 | #define SEC_INT_ACCEL0_DONE (1 << 5) | ||
42 | #define SEC_INT_ACCEL1_DONE (1 << 6) | ||
43 | #define SEC_INT_ACC0_IDMA_DONE (1 << 7) | ||
44 | #define SEC_INT_ACC1_IDMA_DONE (1 << 8) | ||
45 | |||
46 | #define SEC_ACCEL_INT_MASK 0xde24 | ||
47 | |||
48 | #define AES_KEY_LEN (8 * 4) | ||
49 | |||
50 | struct sec_accel_config { | ||
51 | |||
52 | u32 config; | ||
53 | #define CFG_OP_MAC_ONLY 0 | ||
54 | #define CFG_OP_CRYPT_ONLY 1 | ||
55 | #define CFG_OP_MAC_CRYPT 2 | ||
56 | #define CFG_OP_CRYPT_MAC 3 | ||
57 | #define CFG_MACM_MD5 (4 << 4) | ||
58 | #define CFG_MACM_SHA1 (5 << 4) | ||
59 | #define CFG_MACM_HMAC_MD5 (6 << 4) | ||
60 | #define CFG_MACM_HMAC_SHA1 (7 << 4) | ||
61 | #define CFG_ENCM_DES (1 << 8) | ||
62 | #define CFG_ENCM_3DES (2 << 8) | ||
63 | #define CFG_ENCM_AES (3 << 8) | ||
64 | #define CFG_DIR_ENC (0 << 12) | ||
65 | #define CFG_DIR_DEC (1 << 12) | ||
66 | #define CFG_ENC_MODE_ECB (0 << 16) | ||
67 | #define CFG_ENC_MODE_CBC (1 << 16) | ||
68 | #define CFG_3DES_EEE (0 << 20) | ||
69 | #define CFG_3DES_EDE (1 << 20) | ||
70 | #define CFG_AES_LEN_128 (0 << 24) | ||
71 | #define CFG_AES_LEN_192 (1 << 24) | ||
72 | #define CFG_AES_LEN_256 (2 << 24) | ||
73 | |||
74 | u32 enc_p; | ||
75 | #define ENC_P_SRC(x) (x) | ||
76 | #define ENC_P_DST(x) ((x) << 16) | ||
77 | |||
78 | u32 enc_len; | ||
79 | #define ENC_LEN(x) (x) | ||
80 | |||
81 | u32 enc_key_p; | ||
82 | #define ENC_KEY_P(x) (x) | ||
83 | |||
84 | u32 enc_iv; | ||
85 | #define ENC_IV_POINT(x) ((x) << 0) | ||
86 | #define ENC_IV_BUF_POINT(x) ((x) << 16) | ||
87 | |||
88 | u32 mac_src_p; | ||
89 | #define MAC_SRC_DATA_P(x) (x) | ||
90 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) | ||
91 | |||
92 | u32 mac_digest; | ||
93 | u32 mac_iv; | ||
94 | }__attribute__ ((packed)); | ||
95 | /* | ||
96 | * /-----------\ 0 | ||
97 | * | ACCEL CFG | 4 * 8 | ||
98 | * |-----------| 0x20 | ||
99 | * | CRYPT KEY | 8 * 4 | ||
100 | * |-----------| 0x40 | ||
101 | * | IV IN | 4 * 4 | ||
102 | * |-----------| 0x40 (inplace) | ||
103 | * | IV BUF | 4 * 4 | ||
104 | * |-----------| 0x50 | ||
105 | * | DATA IN | 16 * x (max ->max_req_size) | ||
106 | * |-----------| 0x50 (inplace operation) | ||
107 | * | DATA OUT | 16 * x (max ->max_req_size) | ||
108 | * \-----------/ SRAM size | ||
109 | */ | ||
110 | #define SRAM_CONFIG 0x00 | ||
111 | #define SRAM_DATA_KEY_P 0x20 | ||
112 | #define SRAM_DATA_IV 0x40 | ||
113 | #define SRAM_DATA_IV_BUF 0x40 | ||
114 | #define SRAM_DATA_IN_START 0x50 | ||
115 | #define SRAM_DATA_OUT_START 0x50 | ||
116 | |||
117 | #define SRAM_CFG_SPACE 0x50 | ||
118 | |||
119 | #endif | ||
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index a2c8e8514b63..76cb6b345e7b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -12,81 +12,43 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | 16 | #include <crypto/sha.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/cryptohash.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
25 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
26 | #include "padlock.h" | 25 | #include "padlock.h" |
27 | 26 | ||
28 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" | 27 | struct padlock_sha_desc { |
29 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" | 28 | struct shash_desc fallback; |
29 | }; | ||
30 | 30 | ||
31 | struct padlock_sha_ctx { | 31 | struct padlock_sha_ctx { |
32 | char *data; | 32 | struct crypto_shash *fallback; |
33 | size_t used; | ||
34 | int bypass; | ||
35 | void (*f_sha_padlock)(const char *in, char *out, int count); | ||
36 | struct hash_desc fallback; | ||
37 | }; | 33 | }; |
38 | 34 | ||
39 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | 35 | static int padlock_sha_init(struct shash_desc *desc) |
40 | { | ||
41 | return crypto_tfm_ctx(tfm); | ||
42 | } | ||
43 | |||
44 | /* We'll need aligned address on the stack */ | ||
45 | #define NEAREST_ALIGNED(ptr) \ | ||
46 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | ||
47 | |||
48 | static struct crypto_alg sha1_alg, sha256_alg; | ||
49 | |||
50 | static void padlock_sha_bypass(struct crypto_tfm *tfm) | ||
51 | { | 36 | { |
52 | if (ctx(tfm)->bypass) | 37 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
53 | return; | 38 | struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); |
54 | 39 | ||
55 | crypto_hash_init(&ctx(tfm)->fallback); | 40 | dctx->fallback.tfm = ctx->fallback; |
56 | if (ctx(tfm)->data && ctx(tfm)->used) { | 41 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
57 | struct scatterlist sg; | 42 | return crypto_shash_init(&dctx->fallback); |
58 | |||
59 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); | ||
60 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | ||
61 | } | ||
62 | |||
63 | ctx(tfm)->used = 0; | ||
64 | ctx(tfm)->bypass = 1; | ||
65 | } | ||
66 | |||
67 | static void padlock_sha_init(struct crypto_tfm *tfm) | ||
68 | { | ||
69 | ctx(tfm)->used = 0; | ||
70 | ctx(tfm)->bypass = 0; | ||
71 | } | 43 | } |
72 | 44 | ||
73 | static void padlock_sha_update(struct crypto_tfm *tfm, | 45 | static int padlock_sha_update(struct shash_desc *desc, |
74 | const uint8_t *data, unsigned int length) | 46 | const u8 *data, unsigned int length) |
75 | { | 47 | { |
76 | /* Our buffer is always one page. */ | 48 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
77 | if (unlikely(!ctx(tfm)->bypass && | ||
78 | (ctx(tfm)->used + length > PAGE_SIZE))) | ||
79 | padlock_sha_bypass(tfm); | ||
80 | |||
81 | if (unlikely(ctx(tfm)->bypass)) { | ||
82 | struct scatterlist sg; | ||
83 | sg_init_one(&sg, (uint8_t *)data, length); | ||
84 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | ||
85 | return; | ||
86 | } | ||
87 | 49 | ||
88 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | 50 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
89 | ctx(tfm)->used += length; | 51 | return crypto_shash_update(&dctx->fallback, data, length); |
90 | } | 52 | } |
91 | 53 | ||
92 | static inline void padlock_output_block(uint32_t *src, | 54 | static inline void padlock_output_block(uint32_t *src, |
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src, | |||
96 | *dst++ = swab32(*src++); | 58 | *dst++ = swab32(*src++); |
97 | } | 59 | } |
98 | 60 | ||
99 | static void padlock_do_sha1(const char *in, char *out, int count) | 61 | static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, |
62 | unsigned int count, u8 *out) | ||
100 | { | 63 | { |
101 | /* We can't store directly to *out as it may be unaligned. */ | 64 | /* We can't store directly to *out as it may be unaligned. */ |
102 | /* BTW Don't reduce the buffer size below 128 Bytes! | 65 | /* BTW Don't reduce the buffer size below 128 Bytes! |
103 | * PadLock microcode needs it that big. */ | 66 | * PadLock microcode needs it that big. */ |
104 | char buf[128+16]; | 67 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
105 | char *result = NEAREST_ALIGNED(buf); | 68 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
69 | struct sha1_state state; | ||
70 | unsigned int space; | ||
71 | unsigned int leftover; | ||
106 | int ts_state; | 72 | int ts_state; |
73 | int err; | ||
74 | |||
75 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
76 | err = crypto_shash_export(&dctx->fallback, &state); | ||
77 | if (err) | ||
78 | goto out; | ||
79 | |||
80 | if (state.count + count > ULONG_MAX) | ||
81 | return crypto_shash_finup(&dctx->fallback, in, count, out); | ||
82 | |||
83 | leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; | ||
84 | space = SHA1_BLOCK_SIZE - leftover; | ||
85 | if (space) { | ||
86 | if (count > space) { | ||
87 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
88 | crypto_shash_export(&dctx->fallback, &state); | ||
89 | if (err) | ||
90 | goto out; | ||
91 | count -= space; | ||
92 | in += space; | ||
93 | } else { | ||
94 | memcpy(state.buffer + leftover, in, count); | ||
95 | in = state.buffer; | ||
96 | count += leftover; | ||
97 | state.count &= ~(SHA1_BLOCK_SIZE - 1); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | memcpy(result, &state.state, SHA1_DIGEST_SIZE); | ||
107 | 102 | ||
108 | ((uint32_t *)result)[0] = SHA1_H0; | ||
109 | ((uint32_t *)result)[1] = SHA1_H1; | ||
110 | ((uint32_t *)result)[2] = SHA1_H2; | ||
111 | ((uint32_t *)result)[3] = SHA1_H3; | ||
112 | ((uint32_t *)result)[4] = SHA1_H4; | ||
113 | |||
114 | /* prevent taking the spurious DNA fault with padlock. */ | 103 | /* prevent taking the spurious DNA fault with padlock. */ |
115 | ts_state = irq_ts_save(); | 104 | ts_state = irq_ts_save(); |
116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 105 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ |
117 | : "+S"(in), "+D"(result) | 106 | : \ |
118 | : "c"(count), "a"(0)); | 107 | : "c"((unsigned long)state.count + count), \ |
108 | "a"((unsigned long)state.count), \ | ||
109 | "S"(in), "D"(result)); | ||
119 | irq_ts_restore(ts_state); | 110 | irq_ts_restore(ts_state); |
120 | 111 | ||
121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 112 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); |
113 | |||
114 | out: | ||
115 | return err; | ||
122 | } | 116 | } |
123 | 117 | ||
124 | static void padlock_do_sha256(const char *in, char *out, int count) | 118 | static int padlock_sha1_final(struct shash_desc *desc, u8 *out) |
119 | { | ||
120 | u8 buf[4]; | ||
121 | |||
122 | return padlock_sha1_finup(desc, buf, 0, out); | ||
123 | } | ||
124 | |||
125 | static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, | ||
126 | unsigned int count, u8 *out) | ||
125 | { | 127 | { |
126 | /* We can't store directly to *out as it may be unaligned. */ | 128 | /* We can't store directly to *out as it may be unaligned. */ |
127 | /* BTW Don't reduce the buffer size below 128 Bytes! | 129 | /* BTW Don't reduce the buffer size below 128 Bytes! |
128 | * PadLock microcode needs it that big. */ | 130 | * PadLock microcode needs it that big. */ |
129 | char buf[128+16]; | 131 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
130 | char *result = NEAREST_ALIGNED(buf); | 132 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
133 | struct sha256_state state; | ||
134 | unsigned int space; | ||
135 | unsigned int leftover; | ||
131 | int ts_state; | 136 | int ts_state; |
137 | int err; | ||
138 | |||
139 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
140 | err = crypto_shash_export(&dctx->fallback, &state); | ||
141 | if (err) | ||
142 | goto out; | ||
143 | |||
144 | if (state.count + count > ULONG_MAX) | ||
145 | return crypto_shash_finup(&dctx->fallback, in, count, out); | ||
146 | |||
147 | leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; | ||
148 | space = SHA256_BLOCK_SIZE - leftover; | ||
149 | if (space) { | ||
150 | if (count > space) { | ||
151 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
152 | crypto_shash_export(&dctx->fallback, &state); | ||
153 | if (err) | ||
154 | goto out; | ||
155 | count -= space; | ||
156 | in += space; | ||
157 | } else { | ||
158 | memcpy(state.buf + leftover, in, count); | ||
159 | in = state.buf; | ||
160 | count += leftover; | ||
161 | state.count &= ~(SHA1_BLOCK_SIZE - 1); | ||
162 | } | ||
163 | } | ||
132 | 164 | ||
133 | ((uint32_t *)result)[0] = SHA256_H0; | 165 | memcpy(result, &state.state, SHA256_DIGEST_SIZE); |
134 | ((uint32_t *)result)[1] = SHA256_H1; | ||
135 | ((uint32_t *)result)[2] = SHA256_H2; | ||
136 | ((uint32_t *)result)[3] = SHA256_H3; | ||
137 | ((uint32_t *)result)[4] = SHA256_H4; | ||
138 | ((uint32_t *)result)[5] = SHA256_H5; | ||
139 | ((uint32_t *)result)[6] = SHA256_H6; | ||
140 | ((uint32_t *)result)[7] = SHA256_H7; | ||
141 | 166 | ||
142 | /* prevent taking the spurious DNA fault with padlock. */ | 167 | /* prevent taking the spurious DNA fault with padlock. */ |
143 | ts_state = irq_ts_save(); | 168 | ts_state = irq_ts_save(); |
144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 169 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ |
145 | : "+S"(in), "+D"(result) | 170 | : \ |
146 | : "c"(count), "a"(0)); | 171 | : "c"((unsigned long)state.count + count), \ |
172 | "a"((unsigned long)state.count), \ | ||
173 | "S"(in), "D"(result)); | ||
147 | irq_ts_restore(ts_state); | 174 | irq_ts_restore(ts_state); |
148 | 175 | ||
149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 176 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); |
177 | |||
178 | out: | ||
179 | return err; | ||
150 | } | 180 | } |
151 | 181 | ||
152 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | 182 | static int padlock_sha256_final(struct shash_desc *desc, u8 *out) |
153 | { | 183 | { |
154 | if (unlikely(ctx(tfm)->bypass)) { | 184 | u8 buf[4]; |
155 | crypto_hash_final(&ctx(tfm)->fallback, out); | ||
156 | ctx(tfm)->bypass = 0; | ||
157 | return; | ||
158 | } | ||
159 | 185 | ||
160 | /* Pass the input buffer to PadLock microcode... */ | 186 | return padlock_sha256_finup(desc, buf, 0, out); |
161 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | ||
162 | |||
163 | ctx(tfm)->used = 0; | ||
164 | } | 187 | } |
165 | 188 | ||
166 | static int padlock_cra_init(struct crypto_tfm *tfm) | 189 | static int padlock_cra_init(struct crypto_tfm *tfm) |
167 | { | 190 | { |
191 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
168 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 192 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
169 | struct crypto_hash *fallback_tfm; | 193 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
170 | 194 | struct crypto_shash *fallback_tfm; | |
171 | /* For now we'll allocate one page. This | 195 | int err = -ENOMEM; |
172 | * could eventually be configurable one day. */ | ||
173 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | ||
174 | if (!ctx(tfm)->data) | ||
175 | return -ENOMEM; | ||
176 | 196 | ||
177 | /* Allocate a fallback and abort if it failed. */ | 197 | /* Allocate a fallback and abort if it failed. */ |
178 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, | 198 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, |
179 | CRYPTO_ALG_ASYNC | | 199 | CRYPTO_ALG_NEED_FALLBACK); |
180 | CRYPTO_ALG_NEED_FALLBACK); | ||
181 | if (IS_ERR(fallback_tfm)) { | 200 | if (IS_ERR(fallback_tfm)) { |
182 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 201 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", |
183 | fallback_driver_name); | 202 | fallback_driver_name); |
184 | free_page((unsigned long)(ctx(tfm)->data)); | 203 | err = PTR_ERR(fallback_tfm); |
185 | return PTR_ERR(fallback_tfm); | 204 | goto out; |
186 | } | 205 | } |
187 | 206 | ||
188 | ctx(tfm)->fallback.tfm = fallback_tfm; | 207 | ctx->fallback = fallback_tfm; |
208 | hash->descsize += crypto_shash_descsize(fallback_tfm); | ||
189 | return 0; | 209 | return 0; |
190 | } | ||
191 | |||
192 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | ||
193 | { | ||
194 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | ||
195 | 210 | ||
196 | return padlock_cra_init(tfm); | 211 | out: |
197 | } | 212 | return err; |
198 | |||
199 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | ||
200 | { | ||
201 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | ||
202 | |||
203 | return padlock_cra_init(tfm); | ||
204 | } | 213 | } |
205 | 214 | ||
206 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 215 | static void padlock_cra_exit(struct crypto_tfm *tfm) |
207 | { | 216 | { |
208 | if (ctx(tfm)->data) { | 217 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
209 | free_page((unsigned long)(ctx(tfm)->data)); | ||
210 | ctx(tfm)->data = NULL; | ||
211 | } | ||
212 | 218 | ||
213 | crypto_free_hash(ctx(tfm)->fallback.tfm); | 219 | crypto_free_shash(ctx->fallback); |
214 | ctx(tfm)->fallback.tfm = NULL; | ||
215 | } | 220 | } |
216 | 221 | ||
217 | static struct crypto_alg sha1_alg = { | 222 | static struct shash_alg sha1_alg = { |
218 | .cra_name = "sha1", | 223 | .digestsize = SHA1_DIGEST_SIZE, |
219 | .cra_driver_name = "sha1-padlock", | 224 | .init = padlock_sha_init, |
220 | .cra_priority = PADLOCK_CRA_PRIORITY, | 225 | .update = padlock_sha_update, |
221 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 226 | .finup = padlock_sha1_finup, |
222 | CRYPTO_ALG_NEED_FALLBACK, | 227 | .final = padlock_sha1_final, |
223 | .cra_blocksize = SHA1_BLOCK_SIZE, | 228 | .descsize = sizeof(struct padlock_sha_desc), |
224 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 229 | .base = { |
225 | .cra_module = THIS_MODULE, | 230 | .cra_name = "sha1", |
226 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), | 231 | .cra_driver_name = "sha1-padlock", |
227 | .cra_init = padlock_sha1_cra_init, | 232 | .cra_priority = PADLOCK_CRA_PRIORITY, |
228 | .cra_exit = padlock_cra_exit, | 233 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
229 | .cra_u = { | 234 | CRYPTO_ALG_NEED_FALLBACK, |
230 | .digest = { | 235 | .cra_blocksize = SHA1_BLOCK_SIZE, |
231 | .dia_digestsize = SHA1_DIGEST_SIZE, | 236 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
232 | .dia_init = padlock_sha_init, | 237 | .cra_module = THIS_MODULE, |
233 | .dia_update = padlock_sha_update, | 238 | .cra_init = padlock_cra_init, |
234 | .dia_final = padlock_sha_final, | 239 | .cra_exit = padlock_cra_exit, |
235 | } | ||
236 | } | 240 | } |
237 | }; | 241 | }; |
238 | 242 | ||
239 | static struct crypto_alg sha256_alg = { | 243 | static struct shash_alg sha256_alg = { |
240 | .cra_name = "sha256", | 244 | .digestsize = SHA256_DIGEST_SIZE, |
241 | .cra_driver_name = "sha256-padlock", | 245 | .init = padlock_sha_init, |
242 | .cra_priority = PADLOCK_CRA_PRIORITY, | 246 | .update = padlock_sha_update, |
243 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 247 | .finup = padlock_sha256_finup, |
244 | CRYPTO_ALG_NEED_FALLBACK, | 248 | .final = padlock_sha256_final, |
245 | .cra_blocksize = SHA256_BLOCK_SIZE, | 249 | .descsize = sizeof(struct padlock_sha_desc), |
246 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 250 | .base = { |
247 | .cra_module = THIS_MODULE, | 251 | .cra_name = "sha256", |
248 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), | 252 | .cra_driver_name = "sha256-padlock", |
249 | .cra_init = padlock_sha256_cra_init, | 253 | .cra_priority = PADLOCK_CRA_PRIORITY, |
250 | .cra_exit = padlock_cra_exit, | 254 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
251 | .cra_u = { | 255 | CRYPTO_ALG_NEED_FALLBACK, |
252 | .digest = { | 256 | .cra_blocksize = SHA256_BLOCK_SIZE, |
253 | .dia_digestsize = SHA256_DIGEST_SIZE, | 257 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
254 | .dia_init = padlock_sha_init, | 258 | .cra_module = THIS_MODULE, |
255 | .dia_update = padlock_sha_update, | 259 | .cra_init = padlock_cra_init, |
256 | .dia_final = padlock_sha_final, | 260 | .cra_exit = padlock_cra_exit, |
257 | } | ||
258 | } | 261 | } |
259 | }; | 262 | }; |
260 | 263 | ||
@@ -272,11 +275,11 @@ static int __init padlock_init(void) | |||
272 | return -ENODEV; | 275 | return -ENODEV; |
273 | } | 276 | } |
274 | 277 | ||
275 | rc = crypto_register_alg(&sha1_alg); | 278 | rc = crypto_register_shash(&sha1_alg); |
276 | if (rc) | 279 | if (rc) |
277 | goto out; | 280 | goto out; |
278 | 281 | ||
279 | rc = crypto_register_alg(&sha256_alg); | 282 | rc = crypto_register_shash(&sha256_alg); |
280 | if (rc) | 283 | if (rc) |
281 | goto out_unreg1; | 284 | goto out_unreg1; |
282 | 285 | ||
@@ -285,7 +288,7 @@ static int __init padlock_init(void) | |||
285 | return 0; | 288 | return 0; |
286 | 289 | ||
287 | out_unreg1: | 290 | out_unreg1: |
288 | crypto_unregister_alg(&sha1_alg); | 291 | crypto_unregister_shash(&sha1_alg); |
289 | out: | 292 | out: |
290 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 293 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); |
291 | return rc; | 294 | return rc; |
@@ -293,8 +296,8 @@ out: | |||
293 | 296 | ||
294 | static void __exit padlock_fini(void) | 297 | static void __exit padlock_fini(void) |
295 | { | 298 | { |
296 | crypto_unregister_alg(&sha1_alg); | 299 | crypto_unregister_shash(&sha1_alg); |
297 | crypto_unregister_alg(&sha256_alg); | 300 | crypto_unregister_shash(&sha256_alg); |
298 | } | 301 | } |
299 | 302 | ||
300 | module_init(padlock_init); | 303 | module_init(padlock_init); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c70775fd3ce2..c47ffe8a73ef 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -86,6 +86,25 @@ struct talitos_request { | |||
86 | void *context; | 86 | void *context; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* per-channel fifo management */ | ||
90 | struct talitos_channel { | ||
91 | /* request fifo */ | ||
92 | struct talitos_request *fifo; | ||
93 | |||
94 | /* number of requests pending in channel h/w fifo */ | ||
95 | atomic_t submit_count ____cacheline_aligned; | ||
96 | |||
97 | /* request submission (head) lock */ | ||
98 | spinlock_t head_lock ____cacheline_aligned; | ||
99 | /* index to next free descriptor request */ | ||
100 | int head; | ||
101 | |||
102 | /* request release (tail) lock */ | ||
103 | spinlock_t tail_lock ____cacheline_aligned; | ||
104 | /* index to next in-progress/done descriptor request */ | ||
105 | int tail; | ||
106 | }; | ||
107 | |||
89 | struct talitos_private { | 108 | struct talitos_private { |
90 | struct device *dev; | 109 | struct device *dev; |
91 | struct of_device *ofdev; | 110 | struct of_device *ofdev; |
@@ -101,15 +120,6 @@ struct talitos_private { | |||
101 | /* SEC Compatibility info */ | 120 | /* SEC Compatibility info */ |
102 | unsigned long features; | 121 | unsigned long features; |
103 | 122 | ||
104 | /* next channel to be assigned next incoming descriptor */ | ||
105 | atomic_t last_chan; | ||
106 | |||
107 | /* per-channel number of requests pending in channel h/w fifo */ | ||
108 | atomic_t *submit_count; | ||
109 | |||
110 | /* per-channel request fifo */ | ||
111 | struct talitos_request **fifo; | ||
112 | |||
113 | /* | 123 | /* |
114 | * length of the request fifo | 124 | * length of the request fifo |
115 | * fifo_len is chfifo_len rounded up to next power of 2 | 125 | * fifo_len is chfifo_len rounded up to next power of 2 |
@@ -117,15 +127,10 @@ struct talitos_private { | |||
117 | */ | 127 | */ |
118 | unsigned int fifo_len; | 128 | unsigned int fifo_len; |
119 | 129 | ||
120 | /* per-channel index to next free descriptor request */ | 130 | struct talitos_channel *chan; |
121 | int *head; | ||
122 | |||
123 | /* per-channel index to next in-progress/done descriptor request */ | ||
124 | int *tail; | ||
125 | 131 | ||
126 | /* per-channel request submission (head) and release (tail) locks */ | 132 | /* next channel to be assigned next incoming descriptor */ |
127 | spinlock_t *head_lock; | 133 | atomic_t last_chan ____cacheline_aligned; |
128 | spinlock_t *tail_lock; | ||
129 | 134 | ||
130 | /* request callback tasklet */ | 135 | /* request callback tasklet */ |
131 | struct tasklet_struct done_task; | 136 | struct tasklet_struct done_task; |
@@ -141,6 +146,12 @@ struct talitos_private { | |||
141 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 146 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
142 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 147 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
143 | 148 | ||
149 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | ||
150 | { | ||
151 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | ||
152 | talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); | ||
153 | } | ||
154 | |||
144 | /* | 155 | /* |
145 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 156 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
146 | */ | 157 | */ |
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev, | |||
150 | unsigned char extent, | 161 | unsigned char extent, |
151 | enum dma_data_direction dir) | 162 | enum dma_data_direction dir) |
152 | { | 163 | { |
164 | dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); | ||
165 | |||
153 | talitos_ptr->len = cpu_to_be16(len); | 166 | talitos_ptr->len = cpu_to_be16(len); |
154 | talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); | 167 | to_talitos_ptr(talitos_ptr, dma_addr); |
155 | talitos_ptr->j_extent = extent; | 168 | talitos_ptr->j_extent = extent; |
156 | } | 169 | } |
157 | 170 | ||
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch) | |||
182 | return -EIO; | 195 | return -EIO; |
183 | } | 196 | } |
184 | 197 | ||
185 | /* set done writeback and IRQ */ | 198 | /* set 36-bit addressing, done writeback enable and done IRQ enable */ |
186 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | | 199 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE | |
187 | TALITOS_CCCR_LO_CDIE); | 200 | TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); |
188 | 201 | ||
189 | /* and ICCR writeback, if available */ | 202 | /* and ICCR writeback, if available */ |
190 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | 203 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) |
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
282 | /* emulate SEC's round-robin channel fifo polling scheme */ | 295 | /* emulate SEC's round-robin channel fifo polling scheme */ |
283 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); | 296 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); |
284 | 297 | ||
285 | spin_lock_irqsave(&priv->head_lock[ch], flags); | 298 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
286 | 299 | ||
287 | if (!atomic_inc_not_zero(&priv->submit_count[ch])) { | 300 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { |
288 | /* h/w fifo is full */ | 301 | /* h/w fifo is full */ |
289 | spin_unlock_irqrestore(&priv->head_lock[ch], flags); | 302 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
290 | return -EAGAIN; | 303 | return -EAGAIN; |
291 | } | 304 | } |
292 | 305 | ||
293 | head = priv->head[ch]; | 306 | head = priv->chan[ch].head; |
294 | request = &priv->fifo[ch][head]; | 307 | request = &priv->chan[ch].fifo[head]; |
295 | 308 | ||
296 | /* map descriptor and save caller data */ | 309 | /* map descriptor and save caller data */ |
297 | request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), | 310 | request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), |
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
300 | request->context = context; | 313 | request->context = context; |
301 | 314 | ||
302 | /* increment fifo head */ | 315 | /* increment fifo head */ |
303 | priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); | 316 | priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); |
304 | 317 | ||
305 | smp_wmb(); | 318 | smp_wmb(); |
306 | request->desc = desc; | 319 | request->desc = desc; |
307 | 320 | ||
308 | /* GO! */ | 321 | /* GO! */ |
309 | wmb(); | 322 | wmb(); |
310 | out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); | 323 | out_be32(priv->reg + TALITOS_FF(ch), |
324 | cpu_to_be32(upper_32_bits(request->dma_desc))); | ||
325 | out_be32(priv->reg + TALITOS_FF_LO(ch), | ||
326 | cpu_to_be32(lower_32_bits(request->dma_desc))); | ||
311 | 327 | ||
312 | spin_unlock_irqrestore(&priv->head_lock[ch], flags); | 328 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
313 | 329 | ||
314 | return -EINPROGRESS; | 330 | return -EINPROGRESS; |
315 | } | 331 | } |
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
324 | unsigned long flags; | 340 | unsigned long flags; |
325 | int tail, status; | 341 | int tail, status; |
326 | 342 | ||
327 | spin_lock_irqsave(&priv->tail_lock[ch], flags); | 343 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
328 | 344 | ||
329 | tail = priv->tail[ch]; | 345 | tail = priv->chan[ch].tail; |
330 | while (priv->fifo[ch][tail].desc) { | 346 | while (priv->chan[ch].fifo[tail].desc) { |
331 | request = &priv->fifo[ch][tail]; | 347 | request = &priv->chan[ch].fifo[tail]; |
332 | 348 | ||
333 | /* descriptors with their done bits set don't get the error */ | 349 | /* descriptors with their done bits set don't get the error */ |
334 | rmb(); | 350 | rmb(); |
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
354 | request->desc = NULL; | 370 | request->desc = NULL; |
355 | 371 | ||
356 | /* increment fifo tail */ | 372 | /* increment fifo tail */ |
357 | priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); | 373 | priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); |
358 | 374 | ||
359 | spin_unlock_irqrestore(&priv->tail_lock[ch], flags); | 375 | spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
360 | 376 | ||
361 | atomic_dec(&priv->submit_count[ch]); | 377 | atomic_dec(&priv->chan[ch].submit_count); |
362 | 378 | ||
363 | saved_req.callback(dev, saved_req.desc, saved_req.context, | 379 | saved_req.callback(dev, saved_req.desc, saved_req.context, |
364 | status); | 380 | status); |
365 | /* channel may resume processing in single desc error case */ | 381 | /* channel may resume processing in single desc error case */ |
366 | if (error && !reset_ch && status == error) | 382 | if (error && !reset_ch && status == error) |
367 | return; | 383 | return; |
368 | spin_lock_irqsave(&priv->tail_lock[ch], flags); | 384 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
369 | tail = priv->tail[ch]; | 385 | tail = priv->chan[ch].tail; |
370 | } | 386 | } |
371 | 387 | ||
372 | spin_unlock_irqrestore(&priv->tail_lock[ch], flags); | 388 | spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
373 | } | 389 | } |
374 | 390 | ||
375 | /* | 391 | /* |
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data) | |||
397 | static struct talitos_desc *current_desc(struct device *dev, int ch) | 413 | static struct talitos_desc *current_desc(struct device *dev, int ch) |
398 | { | 414 | { |
399 | struct talitos_private *priv = dev_get_drvdata(dev); | 415 | struct talitos_private *priv = dev_get_drvdata(dev); |
400 | int tail = priv->tail[ch]; | 416 | int tail = priv->chan[ch].tail; |
401 | dma_addr_t cur_desc; | 417 | dma_addr_t cur_desc; |
402 | 418 | ||
403 | cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); | 419 | cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); |
404 | 420 | ||
405 | while (priv->fifo[ch][tail].dma_desc != cur_desc) { | 421 | while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) { |
406 | tail = (tail + 1) & (priv->fifo_len - 1); | 422 | tail = (tail + 1) & (priv->fifo_len - 1); |
407 | if (tail == priv->tail[ch]) { | 423 | if (tail == priv->chan[ch].tail) { |
408 | dev_err(dev, "couldn't locate current descriptor\n"); | 424 | dev_err(dev, "couldn't locate current descriptor\n"); |
409 | return NULL; | 425 | return NULL; |
410 | } | 426 | } |
411 | } | 427 | } |
412 | 428 | ||
413 | return priv->fifo[ch][tail].desc; | 429 | return priv->chan[ch].fifo[tail].desc; |
414 | } | 430 | } |
415 | 431 | ||
416 | /* | 432 | /* |
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
929 | int n_sg = sg_count; | 945 | int n_sg = sg_count; |
930 | 946 | ||
931 | while (n_sg--) { | 947 | while (n_sg--) { |
932 | link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); | 948 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); |
933 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); | 949 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); |
934 | link_tbl_ptr->j_extent = 0; | 950 | link_tbl_ptr->j_extent = 0; |
935 | link_tbl_ptr++; | 951 | link_tbl_ptr++; |
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
970 | struct talitos_desc *desc = &edesc->desc; | 986 | struct talitos_desc *desc = &edesc->desc; |
971 | unsigned int cryptlen = areq->cryptlen; | 987 | unsigned int cryptlen = areq->cryptlen; |
972 | unsigned int authsize = ctx->authsize; | 988 | unsigned int authsize = ctx->authsize; |
973 | unsigned int ivsize; | 989 | unsigned int ivsize = crypto_aead_ivsize(aead); |
974 | int sg_count, ret; | 990 | int sg_count, ret; |
975 | int sg_link_tbl_len; | 991 | int sg_link_tbl_len; |
976 | 992 | ||
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
978 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 994 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
979 | 0, DMA_TO_DEVICE); | 995 | 0, DMA_TO_DEVICE); |
980 | /* hmac data */ | 996 | /* hmac data */ |
981 | map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - | 997 | map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize, |
982 | sg_virt(areq->assoc), sg_virt(areq->assoc), 0, | 998 | sg_virt(areq->assoc), 0, DMA_TO_DEVICE); |
983 | DMA_TO_DEVICE); | ||
984 | /* cipher iv */ | 999 | /* cipher iv */ |
985 | ivsize = crypto_aead_ivsize(aead); | ||
986 | map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, | 1000 | map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, |
987 | DMA_TO_DEVICE); | 1001 | DMA_TO_DEVICE); |
988 | 1002 | ||
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1006 | edesc->src_is_chained); | 1020 | edesc->src_is_chained); |
1007 | 1021 | ||
1008 | if (sg_count == 1) { | 1022 | if (sg_count == 1) { |
1009 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1023 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); |
1010 | } else { | 1024 | } else { |
1011 | sg_link_tbl_len = cryptlen; | 1025 | sg_link_tbl_len = cryptlen; |
1012 | 1026 | ||
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1017 | &edesc->link_tbl[0]); | 1031 | &edesc->link_tbl[0]); |
1018 | if (sg_count > 1) { | 1032 | if (sg_count > 1) { |
1019 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1033 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1020 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 1034 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); |
1021 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1035 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1022 | edesc->dma_len, | 1036 | edesc->dma_len, |
1023 | DMA_BIDIRECTIONAL); | 1037 | DMA_BIDIRECTIONAL); |
1024 | } else { | 1038 | } else { |
1025 | /* Only one segment now, so no link tbl needed */ | 1039 | /* Only one segment now, so no link tbl needed */ |
1026 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> | 1040 | to_talitos_ptr(&desc->ptr[4], |
1027 | src)); | 1041 | sg_dma_address(areq->src)); |
1028 | } | 1042 | } |
1029 | } | 1043 | } |
1030 | 1044 | ||
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1039 | edesc->dst_is_chained); | 1053 | edesc->dst_is_chained); |
1040 | 1054 | ||
1041 | if (sg_count == 1) { | 1055 | if (sg_count == 1) { |
1042 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1056 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); |
1043 | } else { | 1057 | } else { |
1044 | struct talitos_ptr *link_tbl_ptr = | 1058 | struct talitos_ptr *link_tbl_ptr = |
1045 | &edesc->link_tbl[edesc->src_nents + 1]; | 1059 | &edesc->link_tbl[edesc->src_nents + 1]; |
1046 | 1060 | ||
1047 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) | 1061 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
1048 | edesc->dma_link_tbl + | 1062 | (edesc->src_nents + 1) * |
1049 | edesc->src_nents + 1); | 1063 | sizeof(struct talitos_ptr)); |
1050 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1064 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1051 | link_tbl_ptr); | 1065 | link_tbl_ptr); |
1052 | 1066 | ||
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1059 | link_tbl_ptr->len = cpu_to_be16(authsize); | 1073 | link_tbl_ptr->len = cpu_to_be16(authsize); |
1060 | 1074 | ||
1061 | /* icv data follows link tables */ | 1075 | /* icv data follows link tables */ |
1062 | link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) | 1076 | to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl + |
1063 | edesc->dma_link_tbl + | 1077 | (edesc->src_nents + edesc->dst_nents + 2) * |
1064 | edesc->src_nents + | 1078 | sizeof(struct talitos_ptr)); |
1065 | edesc->dst_nents + 2); | ||
1066 | |||
1067 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1079 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1068 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1080 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
1069 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1081 | edesc->dma_len, DMA_BIDIRECTIONAL); |
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1338 | 1350 | ||
1339 | /* first DWORD empty */ | 1351 | /* first DWORD empty */ |
1340 | desc->ptr[0].len = 0; | 1352 | desc->ptr[0].len = 0; |
1341 | desc->ptr[0].ptr = 0; | 1353 | to_talitos_ptr(&desc->ptr[0], 0); |
1342 | desc->ptr[0].j_extent = 0; | 1354 | desc->ptr[0].j_extent = 0; |
1343 | 1355 | ||
1344 | /* cipher iv */ | 1356 | /* cipher iv */ |
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1362 | edesc->src_is_chained); | 1374 | edesc->src_is_chained); |
1363 | 1375 | ||
1364 | if (sg_count == 1) { | 1376 | if (sg_count == 1) { |
1365 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1377 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); |
1366 | } else { | 1378 | } else { |
1367 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | 1379 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, |
1368 | &edesc->link_tbl[0]); | 1380 | &edesc->link_tbl[0]); |
1369 | if (sg_count > 1) { | 1381 | if (sg_count > 1) { |
1382 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1370 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1383 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1371 | desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); | ||
1372 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1384 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1373 | edesc->dma_len, | 1385 | edesc->dma_len, |
1374 | DMA_BIDIRECTIONAL); | 1386 | DMA_BIDIRECTIONAL); |
1375 | } else { | 1387 | } else { |
1376 | /* Only one segment now, so no link tbl needed */ | 1388 | /* Only one segment now, so no link tbl needed */ |
1377 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> | 1389 | to_talitos_ptr(&desc->ptr[3], |
1378 | src)); | 1390 | sg_dma_address(areq->src)); |
1379 | } | 1391 | } |
1380 | } | 1392 | } |
1381 | 1393 | ||
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1390 | edesc->dst_is_chained); | 1402 | edesc->dst_is_chained); |
1391 | 1403 | ||
1392 | if (sg_count == 1) { | 1404 | if (sg_count == 1) { |
1393 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1405 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); |
1394 | } else { | 1406 | } else { |
1395 | struct talitos_ptr *link_tbl_ptr = | 1407 | struct talitos_ptr *link_tbl_ptr = |
1396 | &edesc->link_tbl[edesc->src_nents + 1]; | 1408 | &edesc->link_tbl[edesc->src_nents + 1]; |
1397 | 1409 | ||
1410 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | ||
1411 | (edesc->src_nents + 1) * | ||
1412 | sizeof(struct talitos_ptr)); | ||
1398 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1413 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1399 | desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) | ||
1400 | edesc->dma_link_tbl + | ||
1401 | edesc->src_nents + 1); | ||
1402 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1414 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1403 | link_tbl_ptr); | 1415 | link_tbl_ptr); |
1404 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1416 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1411 | 1423 | ||
1412 | /* last DWORD empty */ | 1424 | /* last DWORD empty */ |
1413 | desc->ptr[6].len = 0; | 1425 | desc->ptr[6].len = 0; |
1414 | desc->ptr[6].ptr = 0; | 1426 | to_talitos_ptr(&desc->ptr[6], 0); |
1415 | desc->ptr[6].j_extent = 0; | 1427 | desc->ptr[6].j_extent = 0; |
1416 | 1428 | ||
1417 | ret = talitos_submit(dev, desc, callback, areq); | 1429 | ret = talitos_submit(dev, desc, callback, areq); |
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev) | |||
1742 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) | 1754 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) |
1743 | talitos_unregister_rng(dev); | 1755 | talitos_unregister_rng(dev); |
1744 | 1756 | ||
1745 | kfree(priv->submit_count); | 1757 | for (i = 0; i < priv->num_channels; i++) |
1746 | kfree(priv->tail); | 1758 | if (priv->chan[i].fifo) |
1747 | kfree(priv->head); | 1759 | kfree(priv->chan[i].fifo); |
1748 | |||
1749 | if (priv->fifo) | ||
1750 | for (i = 0; i < priv->num_channels; i++) | ||
1751 | kfree(priv->fifo[i]); | ||
1752 | 1760 | ||
1753 | kfree(priv->fifo); | 1761 | kfree(priv->chan); |
1754 | kfree(priv->head_lock); | ||
1755 | kfree(priv->tail_lock); | ||
1756 | 1762 | ||
1757 | if (priv->irq != NO_IRQ) { | 1763 | if (priv->irq != NO_IRQ) { |
1758 | free_irq(priv->irq, dev); | 1764 | free_irq(priv->irq, dev); |
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev, | |||
1872 | if (of_device_is_compatible(np, "fsl,sec2.1")) | 1878 | if (of_device_is_compatible(np, "fsl,sec2.1")) |
1873 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; | 1879 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; |
1874 | 1880 | ||
1875 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1881 | priv->chan = kzalloc(sizeof(struct talitos_channel) * |
1876 | GFP_KERNEL); | 1882 | priv->num_channels, GFP_KERNEL); |
1877 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1883 | if (!priv->chan) { |
1878 | GFP_KERNEL); | 1884 | dev_err(dev, "failed to allocate channel management space\n"); |
1879 | if (!priv->head_lock || !priv->tail_lock) { | ||
1880 | dev_err(dev, "failed to allocate fifo locks\n"); | ||
1881 | err = -ENOMEM; | 1885 | err = -ENOMEM; |
1882 | goto err_out; | 1886 | goto err_out; |
1883 | } | 1887 | } |
1884 | 1888 | ||
1885 | for (i = 0; i < priv->num_channels; i++) { | 1889 | for (i = 0; i < priv->num_channels; i++) { |
1886 | spin_lock_init(&priv->head_lock[i]); | 1890 | spin_lock_init(&priv->chan[i].head_lock); |
1887 | spin_lock_init(&priv->tail_lock[i]); | 1891 | spin_lock_init(&priv->chan[i].tail_lock); |
1888 | } | ||
1889 | |||
1890 | priv->fifo = kmalloc(sizeof(struct talitos_request *) * | ||
1891 | priv->num_channels, GFP_KERNEL); | ||
1892 | if (!priv->fifo) { | ||
1893 | dev_err(dev, "failed to allocate request fifo\n"); | ||
1894 | err = -ENOMEM; | ||
1895 | goto err_out; | ||
1896 | } | 1892 | } |
1897 | 1893 | ||
1898 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); | 1894 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); |
1899 | 1895 | ||
1900 | for (i = 0; i < priv->num_channels; i++) { | 1896 | for (i = 0; i < priv->num_channels; i++) { |
1901 | priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * | 1897 | priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * |
1902 | priv->fifo_len, GFP_KERNEL); | 1898 | priv->fifo_len, GFP_KERNEL); |
1903 | if (!priv->fifo[i]) { | 1899 | if (!priv->chan[i].fifo) { |
1904 | dev_err(dev, "failed to allocate request fifo %d\n", i); | 1900 | dev_err(dev, "failed to allocate request fifo %d\n", i); |
1905 | err = -ENOMEM; | 1901 | err = -ENOMEM; |
1906 | goto err_out; | 1902 | goto err_out; |
1907 | } | 1903 | } |
1908 | } | 1904 | } |
1909 | 1905 | ||
1910 | priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels, | ||
1911 | GFP_KERNEL); | ||
1912 | if (!priv->submit_count) { | ||
1913 | dev_err(dev, "failed to allocate fifo submit count space\n"); | ||
1914 | err = -ENOMEM; | ||
1915 | goto err_out; | ||
1916 | } | ||
1917 | for (i = 0; i < priv->num_channels; i++) | 1906 | for (i = 0; i < priv->num_channels; i++) |
1918 | atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); | 1907 | atomic_set(&priv->chan[i].submit_count, |
1908 | -(priv->chfifo_len - 1)); | ||
1919 | 1909 | ||
1920 | priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | 1910 | dma_set_mask(dev, DMA_BIT_MASK(36)); |
1921 | priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | ||
1922 | if (!priv->head || !priv->tail) { | ||
1923 | dev_err(dev, "failed to allocate request index space\n"); | ||
1924 | err = -ENOMEM; | ||
1925 | goto err_out; | ||
1926 | } | ||
1927 | 1911 | ||
1928 | /* reset and initialize the h/w */ | 1912 | /* reset and initialize the h/w */ |
1929 | err = init_device(dev); | 1913 | err = init_device(dev); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 575981f0cfda..ff5a1450e145 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -57,6 +57,7 @@ | |||
57 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ | 57 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ |
58 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) | 58 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) |
59 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ | 59 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ |
60 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ | ||
60 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ | 61 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ |
61 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ | 62 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ |
62 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ | 63 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 110e731f5574..1c0b504a42f3 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
196 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 196 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
197 | irm_id, generation, SCODE_100, | 197 | irm_id, generation, SCODE_100, |
198 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, | 198 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, |
199 | data, sizeof(data))) { | 199 | data, 8)) { |
200 | case RCODE_GENERATION: | 200 | case RCODE_GENERATION: |
201 | /* A generation change frees all bandwidth. */ | 201 | /* A generation change frees all bandwidth. */ |
202 | return allocate ? -EAGAIN : bandwidth; | 202 | return allocate ? -EAGAIN : bandwidth; |
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
233 | data[1] = old ^ c; | 233 | data[1] = old ^ c; |
234 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 234 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
235 | irm_id, generation, SCODE_100, | 235 | irm_id, generation, SCODE_100, |
236 | offset, data, sizeof(data))) { | 236 | offset, data, 8)) { |
237 | case RCODE_GENERATION: | 237 | case RCODE_GENERATION: |
238 | /* A generation change frees all channels. */ | 238 | /* A generation change frees all channels. */ |
239 | return allocate ? -EAGAIN : i; | 239 | return allocate ? -EAGAIN : i; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index ecddd11b797a..76b321bb73f9 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/moduleparam.h> | 35 | #include <linux/moduleparam.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/pci_ids.h> | ||
37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
38 | #include <linux/string.h> | 39 | #include <linux/string.h> |
39 | 40 | ||
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2372 | #define ohci_pmac_off(dev) | 2373 | #define ohci_pmac_off(dev) |
2373 | #endif /* CONFIG_PPC_PMAC */ | 2374 | #endif /* CONFIG_PPC_PMAC */ |
2374 | 2375 | ||
2376 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | ||
2377 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | ||
2378 | |||
2375 | static int __devinit pci_probe(struct pci_dev *dev, | 2379 | static int __devinit pci_probe(struct pci_dev *dev, |
2376 | const struct pci_device_id *ent) | 2380 | const struct pci_device_id *ent) |
2377 | { | 2381 | { |
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2422 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 2426 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
2423 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; | 2427 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; |
2424 | 2428 | ||
2429 | /* dual-buffer mode is broken if more than one IR context is active */ | ||
2430 | if (dev->vendor == PCI_VENDOR_ID_AGERE && | ||
2431 | dev->device == PCI_DEVICE_ID_AGERE_FW643) | ||
2432 | ohci->use_dualbuffer = false; | ||
2433 | |||
2434 | /* dual-buffer mode is broken */ | ||
2435 | if (dev->vendor == PCI_VENDOR_ID_RICOH && | ||
2436 | dev->device == PCI_DEVICE_ID_RICOH_R5C832) | ||
2437 | ohci->use_dualbuffer = false; | ||
2438 | |||
2425 | /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ | 2439 | /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ |
2426 | #if !defined(CONFIG_X86_32) | 2440 | #if !defined(CONFIG_X86_32) |
2427 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | 2441 | /* dual-buffer mode is broken with descriptor addresses above 2G */ |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 8d51568ee143..e5df822a8130 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
456 | } | 456 | } |
457 | spin_unlock_irqrestore(&card->lock, flags); | 457 | spin_unlock_irqrestore(&card->lock, flags); |
458 | 458 | ||
459 | if (&orb->link != &lu->orb_list) | 459 | if (&orb->link != &lu->orb_list) { |
460 | orb->callback(orb, &status); | 460 | orb->callback(orb, &status); |
461 | else | 461 | kref_put(&orb->kref, free_orb); |
462 | } else { | ||
462 | fw_error("status write for unknown orb\n"); | 463 | fw_error("status write for unknown orb\n"); |
463 | 464 | } | |
464 | kref_put(&orb->kref, free_orb); | ||
465 | 465 | ||
466 | fw_send_response(card, request, RCODE_COMPLETE); | 466 | fw_send_response(card, request, RCODE_COMPLETE); |
467 | } | 467 | } |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 24c84ae81527..938100f14b16 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -568,35 +568,76 @@ const struct dmi_device * dmi_find_device(int type, const char *name, | |||
568 | EXPORT_SYMBOL(dmi_find_device); | 568 | EXPORT_SYMBOL(dmi_find_device); |
569 | 569 | ||
570 | /** | 570 | /** |
571 | * dmi_get_year - Return year of a DMI date | 571 | * dmi_get_date - parse a DMI date |
572 | * @field: data index (like dmi_get_system_info) | 572 | * @field: data index (see enum dmi_field) |
573 | * @yearp: optional out parameter for the year | ||
574 | * @monthp: optional out parameter for the month | ||
575 | * @dayp: optional out parameter for the day | ||
573 | * | 576 | * |
574 | * Returns -1 when the field doesn't exist. 0 when it is broken. | 577 | * The date field is assumed to be in the form resembling |
578 | * [mm[/dd]]/yy[yy] and the result is stored in the out | ||
579 | * parameters any or all of which can be omitted. | ||
580 | * | ||
581 | * If the field doesn't exist, all out parameters are set to zero | ||
582 | * and false is returned. Otherwise, true is returned with any | ||
583 | * invalid part of date set to zero. | ||
584 | * | ||
585 | * On return, year, month and day are guaranteed to be in the | ||
586 | * range of [0,9999], [0,12] and [0,31] respectively. | ||
575 | */ | 587 | */ |
576 | int dmi_get_year(int field) | 588 | bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) |
577 | { | 589 | { |
578 | int year; | 590 | int year = 0, month = 0, day = 0; |
579 | const char *s = dmi_get_system_info(field); | 591 | bool exists; |
592 | const char *s, *y; | ||
593 | char *e; | ||
580 | 594 | ||
581 | if (!s) | 595 | s = dmi_get_system_info(field); |
582 | return -1; | 596 | exists = s; |
583 | if (*s == '\0') | 597 | if (!exists) |
584 | return 0; | 598 | goto out; |
585 | s = strrchr(s, '/'); | ||
586 | if (!s) | ||
587 | return 0; | ||
588 | 599 | ||
589 | s += 1; | 600 | /* |
590 | year = simple_strtoul(s, NULL, 0); | 601 | * Determine year first. We assume the date string resembles |
591 | if (year && year < 100) { /* 2-digit year */ | 602 | * mm/dd/yy[yy] but the original code extracted only the year |
603 | * from the end. Keep the behavior in the spirit of no | ||
604 | * surprises. | ||
605 | */ | ||
606 | y = strrchr(s, '/'); | ||
607 | if (!y) | ||
608 | goto out; | ||
609 | |||
610 | y++; | ||
611 | year = simple_strtoul(y, &e, 10); | ||
612 | if (y != e && year < 100) { /* 2-digit year */ | ||
592 | year += 1900; | 613 | year += 1900; |
593 | if (year < 1996) /* no dates < spec 1.0 */ | 614 | if (year < 1996) /* no dates < spec 1.0 */ |
594 | year += 100; | 615 | year += 100; |
595 | } | 616 | } |
617 | if (year > 9999) /* year should fit in %04d */ | ||
618 | year = 0; | ||
619 | |||
620 | /* parse the mm and dd */ | ||
621 | month = simple_strtoul(s, &e, 10); | ||
622 | if (s == e || *e != '/' || !month || month > 12) { | ||
623 | month = 0; | ||
624 | goto out; | ||
625 | } | ||
596 | 626 | ||
597 | return year; | 627 | s = e + 1; |
628 | day = simple_strtoul(s, &e, 10); | ||
629 | if (s == y || s == e || *e != '/' || day > 31) | ||
630 | day = 0; | ||
631 | out: | ||
632 | if (yearp) | ||
633 | *yearp = year; | ||
634 | if (monthp) | ||
635 | *monthp = month; | ||
636 | if (dayp) | ||
637 | *dayp = day; | ||
638 | return exists; | ||
598 | } | 639 | } |
599 | EXPORT_SYMBOL(dmi_get_year); | 640 | EXPORT_SYMBOL(dmi_get_date); |
600 | 641 | ||
601 | /** | 642 | /** |
602 | * dmi_walk - Walk the DMI table and get called back for every record | 643 | * dmi_walk - Walk the DMI table and get called back for every record |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 33be210d6723..2f631c75f704 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -258,31 +258,6 @@ void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) | |||
258 | EXPORT_SYMBOL(drm_mode_object_find); | 258 | EXPORT_SYMBOL(drm_mode_object_find); |
259 | 259 | ||
260 | /** | 260 | /** |
261 | * drm_crtc_from_fb - find the CRTC structure associated with an fb | ||
262 | * @dev: DRM device | ||
263 | * @fb: framebuffer in question | ||
264 | * | ||
265 | * LOCKING: | ||
266 | * Caller must hold mode_config lock. | ||
267 | * | ||
268 | * Find CRTC in the mode_config structure that matches @fb. | ||
269 | * | ||
270 | * RETURNS: | ||
271 | * Pointer to the CRTC or NULL if it wasn't found. | ||
272 | */ | ||
273 | struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, | ||
274 | struct drm_framebuffer *fb) | ||
275 | { | ||
276 | struct drm_crtc *crtc; | ||
277 | |||
278 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
279 | if (crtc->fb == fb) | ||
280 | return crtc; | ||
281 | } | ||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * drm_framebuffer_init - initialize a framebuffer | 261 | * drm_framebuffer_init - initialize a framebuffer |
287 | * @dev: DRM device | 262 | * @dev: DRM device |
288 | * | 263 | * |
@@ -328,11 +303,20 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) | |||
328 | { | 303 | { |
329 | struct drm_device *dev = fb->dev; | 304 | struct drm_device *dev = fb->dev; |
330 | struct drm_crtc *crtc; | 305 | struct drm_crtc *crtc; |
306 | struct drm_mode_set set; | ||
307 | int ret; | ||
331 | 308 | ||
332 | /* remove from any CRTC */ | 309 | /* remove from any CRTC */ |
333 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 310 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
334 | if (crtc->fb == fb) | 311 | if (crtc->fb == fb) { |
335 | crtc->fb = NULL; | 312 | /* should turn off the crtc */ |
313 | memset(&set, 0, sizeof(struct drm_mode_set)); | ||
314 | set.crtc = crtc; | ||
315 | set.fb = NULL; | ||
316 | ret = crtc->funcs->set_config(&set); | ||
317 | if (ret) | ||
318 | DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); | ||
319 | } | ||
336 | } | 320 | } |
337 | 321 | ||
338 | drm_mode_object_put(dev, &fb->base); | 322 | drm_mode_object_put(dev, &fb->base); |
@@ -1511,7 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1511 | set.mode = mode; | 1495 | set.mode = mode; |
1512 | set.connectors = connector_set; | 1496 | set.connectors = connector_set; |
1513 | set.num_connectors = crtc_req->count_connectors; | 1497 | set.num_connectors = crtc_req->count_connectors; |
1514 | set.fb =fb; | 1498 | set.fb = fb; |
1515 | ret = crtc->funcs->set_config(&set); | 1499 | ret = crtc->funcs->set_config(&set); |
1516 | 1500 | ||
1517 | out: | 1501 | out: |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 80cc6d06d61b..7f2728bbc16c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -502,12 +502,40 @@ static int add_detailed_info(struct drm_connector *connector, | |||
502 | struct detailed_non_pixel *data = &timing->data.other_data; | 502 | struct detailed_non_pixel *data = &timing->data.other_data; |
503 | struct drm_display_mode *newmode; | 503 | struct drm_display_mode *newmode; |
504 | 504 | ||
505 | /* EDID up to and including 1.2 may put monitor info here */ | 505 | /* X server check is version 1.1 or higher */ |
506 | if (edid->version == 1 && edid->revision < 3) | 506 | if (edid->version == 1 && edid->revision >= 1 && |
507 | continue; | 507 | !timing->pixel_clock) { |
508 | 508 | /* Other timing or info */ | |
509 | /* Detailed mode timing */ | 509 | switch (data->type) { |
510 | if (timing->pixel_clock) { | 510 | case EDID_DETAIL_MONITOR_SERIAL: |
511 | break; | ||
512 | case EDID_DETAIL_MONITOR_STRING: | ||
513 | break; | ||
514 | case EDID_DETAIL_MONITOR_RANGE: | ||
515 | /* Get monitor range data */ | ||
516 | break; | ||
517 | case EDID_DETAIL_MONITOR_NAME: | ||
518 | break; | ||
519 | case EDID_DETAIL_MONITOR_CPDATA: | ||
520 | break; | ||
521 | case EDID_DETAIL_STD_MODES: | ||
522 | /* Five modes per detailed section */ | ||
523 | for (j = 0; j < 5; i++) { | ||
524 | struct std_timing *std; | ||
525 | struct drm_display_mode *newmode; | ||
526 | |||
527 | std = &data->data.timings[j]; | ||
528 | newmode = drm_mode_std(dev, std); | ||
529 | if (newmode) { | ||
530 | drm_mode_probed_add(connector, newmode); | ||
531 | modes++; | ||
532 | } | ||
533 | } | ||
534 | break; | ||
535 | default: | ||
536 | break; | ||
537 | } | ||
538 | } else { | ||
511 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | 539 | newmode = drm_mode_detailed(dev, edid, timing, quirks); |
512 | if (!newmode) | 540 | if (!newmode) |
513 | continue; | 541 | continue; |
@@ -518,38 +546,6 @@ static int add_detailed_info(struct drm_connector *connector, | |||
518 | drm_mode_probed_add(connector, newmode); | 546 | drm_mode_probed_add(connector, newmode); |
519 | 547 | ||
520 | modes++; | 548 | modes++; |
521 | continue; | ||
522 | } | ||
523 | |||
524 | /* Other timing or info */ | ||
525 | switch (data->type) { | ||
526 | case EDID_DETAIL_MONITOR_SERIAL: | ||
527 | break; | ||
528 | case EDID_DETAIL_MONITOR_STRING: | ||
529 | break; | ||
530 | case EDID_DETAIL_MONITOR_RANGE: | ||
531 | /* Get monitor range data */ | ||
532 | break; | ||
533 | case EDID_DETAIL_MONITOR_NAME: | ||
534 | break; | ||
535 | case EDID_DETAIL_MONITOR_CPDATA: | ||
536 | break; | ||
537 | case EDID_DETAIL_STD_MODES: | ||
538 | /* Five modes per detailed section */ | ||
539 | for (j = 0; j < 5; i++) { | ||
540 | struct std_timing *std; | ||
541 | struct drm_display_mode *newmode; | ||
542 | |||
543 | std = &data->data.timings[j]; | ||
544 | newmode = drm_mode_std(dev, std); | ||
545 | if (newmode) { | ||
546 | drm_mode_probed_add(connector, newmode); | ||
547 | modes++; | ||
548 | } | ||
549 | } | ||
550 | break; | ||
551 | default: | ||
552 | break; | ||
553 | } | 549 | } |
554 | } | 550 | } |
555 | 551 | ||
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 85ec31b3ff00..f7a615b80c70 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -22,44 +22,50 @@ | |||
22 | #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) | 22 | #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) |
23 | #define to_drm_connector(d) container_of(d, struct drm_connector, kdev) | 23 | #define to_drm_connector(d) container_of(d, struct drm_connector, kdev) |
24 | 24 | ||
25 | static struct device_type drm_sysfs_device_minor = { | ||
26 | .name = "drm_minor" | ||
27 | }; | ||
28 | |||
25 | /** | 29 | /** |
26 | * drm_sysfs_suspend - DRM class suspend hook | 30 | * drm_class_suspend - DRM class suspend hook |
27 | * @dev: Linux device to suspend | 31 | * @dev: Linux device to suspend |
28 | * @state: power state to enter | 32 | * @state: power state to enter |
29 | * | 33 | * |
30 | * Just figures out what the actual struct drm_device associated with | 34 | * Just figures out what the actual struct drm_device associated with |
31 | * @dev is and calls its suspend hook, if present. | 35 | * @dev is and calls its suspend hook, if present. |
32 | */ | 36 | */ |
33 | static int drm_sysfs_suspend(struct device *dev, pm_message_t state) | 37 | static int drm_class_suspend(struct device *dev, pm_message_t state) |
34 | { | 38 | { |
35 | struct drm_minor *drm_minor = to_drm_minor(dev); | 39 | if (dev->type == &drm_sysfs_device_minor) { |
36 | struct drm_device *drm_dev = drm_minor->dev; | 40 | struct drm_minor *drm_minor = to_drm_minor(dev); |
37 | 41 | struct drm_device *drm_dev = drm_minor->dev; | |
38 | if (drm_minor->type == DRM_MINOR_LEGACY && | 42 | |
39 | !drm_core_check_feature(drm_dev, DRIVER_MODESET) && | 43 | if (drm_minor->type == DRM_MINOR_LEGACY && |
40 | drm_dev->driver->suspend) | 44 | !drm_core_check_feature(drm_dev, DRIVER_MODESET) && |
41 | return drm_dev->driver->suspend(drm_dev, state); | 45 | drm_dev->driver->suspend) |
42 | 46 | return drm_dev->driver->suspend(drm_dev, state); | |
47 | } | ||
43 | return 0; | 48 | return 0; |
44 | } | 49 | } |
45 | 50 | ||
46 | /** | 51 | /** |
47 | * drm_sysfs_resume - DRM class resume hook | 52 | * drm_class_resume - DRM class resume hook |
48 | * @dev: Linux device to resume | 53 | * @dev: Linux device to resume |
49 | * | 54 | * |
50 | * Just figures out what the actual struct drm_device associated with | 55 | * Just figures out what the actual struct drm_device associated with |
51 | * @dev is and calls its resume hook, if present. | 56 | * @dev is and calls its resume hook, if present. |
52 | */ | 57 | */ |
53 | static int drm_sysfs_resume(struct device *dev) | 58 | static int drm_class_resume(struct device *dev) |
54 | { | 59 | { |
55 | struct drm_minor *drm_minor = to_drm_minor(dev); | 60 | if (dev->type == &drm_sysfs_device_minor) { |
56 | struct drm_device *drm_dev = drm_minor->dev; | 61 | struct drm_minor *drm_minor = to_drm_minor(dev); |
57 | 62 | struct drm_device *drm_dev = drm_minor->dev; | |
58 | if (drm_minor->type == DRM_MINOR_LEGACY && | 63 | |
59 | !drm_core_check_feature(drm_dev, DRIVER_MODESET) && | 64 | if (drm_minor->type == DRM_MINOR_LEGACY && |
60 | drm_dev->driver->resume) | 65 | !drm_core_check_feature(drm_dev, DRIVER_MODESET) && |
61 | return drm_dev->driver->resume(drm_dev); | 66 | drm_dev->driver->resume) |
62 | 67 | return drm_dev->driver->resume(drm_dev); | |
68 | } | ||
63 | return 0; | 69 | return 0; |
64 | } | 70 | } |
65 | 71 | ||
@@ -99,8 +105,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name) | |||
99 | goto err_out; | 105 | goto err_out; |
100 | } | 106 | } |
101 | 107 | ||
102 | class->suspend = drm_sysfs_suspend; | 108 | class->suspend = drm_class_suspend; |
103 | class->resume = drm_sysfs_resume; | 109 | class->resume = drm_class_resume; |
104 | 110 | ||
105 | err = class_create_file(class, &class_attr_version); | 111 | err = class_create_file(class, &class_attr_version); |
106 | if (err) | 112 | if (err) |
@@ -480,6 +486,7 @@ int drm_sysfs_device_add(struct drm_minor *minor) | |||
480 | minor->kdev.class = drm_class; | 486 | minor->kdev.class = drm_class; |
481 | minor->kdev.release = drm_sysfs_device_release; | 487 | minor->kdev.release = drm_sysfs_device_release; |
482 | minor->kdev.devt = minor->device; | 488 | minor->kdev.devt = minor->device; |
489 | minor->kdev.type = &drm_sysfs_device_minor; | ||
483 | if (minor->type == DRM_MINOR_CONTROL) | 490 | if (minor->type == DRM_MINOR_CONTROL) |
484 | minor_str = "controlD%d"; | 491 | minor_str = "controlD%d"; |
485 | else if (minor->type == DRM_MINOR_RENDER) | 492 | else if (minor->type == DRM_MINOR_RENDER) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7537f57d8a87..5b4f87e55621 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -222,6 +222,7 @@ typedef struct drm_i915_private { | |||
222 | unsigned int edp_support:1; | 222 | unsigned int edp_support:1; |
223 | int lvds_ssc_freq; | 223 | int lvds_ssc_freq; |
224 | 224 | ||
225 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ | ||
225 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 226 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
226 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 227 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
227 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 228 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
@@ -384,6 +385,9 @@ typedef struct drm_i915_private { | |||
384 | */ | 385 | */ |
385 | struct list_head inactive_list; | 386 | struct list_head inactive_list; |
386 | 387 | ||
388 | /** LRU list of objects with fence regs on them. */ | ||
389 | struct list_head fence_list; | ||
390 | |||
387 | /** | 391 | /** |
388 | * List of breadcrumbs associated with GPU requests currently | 392 | * List of breadcrumbs associated with GPU requests currently |
389 | * outstanding. | 393 | * outstanding. |
@@ -451,6 +455,9 @@ struct drm_i915_gem_object { | |||
451 | /** This object's place on the active/flushing/inactive lists */ | 455 | /** This object's place on the active/flushing/inactive lists */ |
452 | struct list_head list; | 456 | struct list_head list; |
453 | 457 | ||
458 | /** This object's place on the fenced object LRU */ | ||
459 | struct list_head fence_list; | ||
460 | |||
454 | /** | 461 | /** |
455 | * This is set if the object is on the active or flushing lists | 462 | * This is set if the object is on the active or flushing lists |
456 | * (has pending rendering), and is not set if it's on inactive (ready | 463 | * (has pending rendering), and is not set if it's on inactive (ready |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 140bee142fc2..80e5ba490dc2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -978,6 +978,7 @@ int | |||
978 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 978 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
979 | struct drm_file *file_priv) | 979 | struct drm_file *file_priv) |
980 | { | 980 | { |
981 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
981 | struct drm_i915_gem_set_domain *args = data; | 982 | struct drm_i915_gem_set_domain *args = data; |
982 | struct drm_gem_object *obj; | 983 | struct drm_gem_object *obj; |
983 | uint32_t read_domains = args->read_domains; | 984 | uint32_t read_domains = args->read_domains; |
@@ -1010,8 +1011,18 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1010 | obj, obj->size, read_domains, write_domain); | 1011 | obj, obj->size, read_domains, write_domain); |
1011 | #endif | 1012 | #endif |
1012 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1013 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
1014 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1015 | |||
1013 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1016 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
1014 | 1017 | ||
1018 | /* Update the LRU on the fence for the CPU access that's | ||
1019 | * about to occur. | ||
1020 | */ | ||
1021 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
1022 | list_move_tail(&obj_priv->fence_list, | ||
1023 | &dev_priv->mm.fence_list); | ||
1024 | } | ||
1025 | |||
1015 | /* Silently promote "you're not bound, there was nothing to do" | 1026 | /* Silently promote "you're not bound, there was nothing to do" |
1016 | * to success, since the client was just asking us to | 1027 | * to success, since the client was just asking us to |
1017 | * make sure everything was done. | 1028 | * make sure everything was done. |
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1155 | } | 1166 | } |
1156 | 1167 | ||
1157 | /* Need a new fence register? */ | 1168 | /* Need a new fence register? */ |
1158 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1169 | if (obj_priv->tiling_mode != I915_TILING_NONE) { |
1159 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1160 | ret = i915_gem_object_get_fence_reg(obj); | 1170 | ret = i915_gem_object_get_fence_reg(obj); |
1161 | if (ret) { | 1171 | if (ret) { |
1162 | mutex_unlock(&dev->struct_mutex); | 1172 | mutex_unlock(&dev->struct_mutex); |
@@ -2208,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2208 | struct drm_i915_gem_object *old_obj_priv = NULL; | 2218 | struct drm_i915_gem_object *old_obj_priv = NULL; |
2209 | int i, ret, avail; | 2219 | int i, ret, avail; |
2210 | 2220 | ||
2221 | /* Just update our place in the LRU if our fence is getting used. */ | ||
2222 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
2223 | list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); | ||
2224 | return 0; | ||
2225 | } | ||
2226 | |||
2211 | switch (obj_priv->tiling_mode) { | 2227 | switch (obj_priv->tiling_mode) { |
2212 | case I915_TILING_NONE: | 2228 | case I915_TILING_NONE: |
2213 | WARN(1, "allocating a fence for non-tiled object?\n"); | 2229 | WARN(1, "allocating a fence for non-tiled object?\n"); |
@@ -2229,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2229 | } | 2245 | } |
2230 | 2246 | ||
2231 | /* First try to find a free reg */ | 2247 | /* First try to find a free reg */ |
2232 | try_again: | ||
2233 | avail = 0; | 2248 | avail = 0; |
2234 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 2249 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
2235 | reg = &dev_priv->fence_regs[i]; | 2250 | reg = &dev_priv->fence_regs[i]; |
@@ -2243,63 +2258,62 @@ try_again: | |||
2243 | 2258 | ||
2244 | /* None available, try to steal one or wait for a user to finish */ | 2259 | /* None available, try to steal one or wait for a user to finish */ |
2245 | if (i == dev_priv->num_fence_regs) { | 2260 | if (i == dev_priv->num_fence_regs) { |
2246 | uint32_t seqno = dev_priv->mm.next_gem_seqno; | 2261 | struct drm_gem_object *old_obj = NULL; |
2247 | 2262 | ||
2248 | if (avail == 0) | 2263 | if (avail == 0) |
2249 | return -ENOSPC; | 2264 | return -ENOSPC; |
2250 | 2265 | ||
2251 | for (i = dev_priv->fence_reg_start; | 2266 | list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list, |
2252 | i < dev_priv->num_fence_regs; i++) { | 2267 | fence_list) { |
2253 | uint32_t this_seqno; | 2268 | old_obj = old_obj_priv->obj; |
2254 | |||
2255 | reg = &dev_priv->fence_regs[i]; | ||
2256 | old_obj_priv = reg->obj->driver_private; | ||
2257 | 2269 | ||
2258 | if (old_obj_priv->pin_count) | 2270 | if (old_obj_priv->pin_count) |
2259 | continue; | 2271 | continue; |
2260 | 2272 | ||
2273 | /* Take a reference, as otherwise the wait_rendering | ||
2274 | * below may cause the object to get freed out from | ||
2275 | * under us. | ||
2276 | */ | ||
2277 | drm_gem_object_reference(old_obj); | ||
2278 | |||
2261 | /* i915 uses fences for GPU access to tiled buffers */ | 2279 | /* i915 uses fences for GPU access to tiled buffers */ |
2262 | if (IS_I965G(dev) || !old_obj_priv->active) | 2280 | if (IS_I965G(dev) || !old_obj_priv->active) |
2263 | break; | 2281 | break; |
2264 | 2282 | ||
2265 | /* find the seqno of the first available fence */ | 2283 | /* This brings the object to the head of the LRU if it |
2266 | this_seqno = old_obj_priv->last_rendering_seqno; | 2284 | * had been written to. The only way this should |
2267 | if (this_seqno != 0 && | 2285 | * result in us waiting longer than the expected |
2268 | reg->obj->write_domain == 0 && | 2286 | * optimal amount of time is if there was a |
2269 | i915_seqno_passed(seqno, this_seqno)) | 2287 | * fence-using buffer later that was read-only. |
2270 | seqno = this_seqno; | 2288 | */ |
2271 | } | 2289 | i915_gem_object_flush_gpu_write_domain(old_obj); |
2272 | 2290 | ret = i915_gem_object_wait_rendering(old_obj); | |
2273 | /* | 2291 | if (ret != 0) { |
2274 | * Now things get ugly... we have to wait for one of the | 2292 | drm_gem_object_unreference(old_obj); |
2275 | * objects to finish before trying again. | 2293 | return ret; |
2276 | */ | ||
2277 | if (i == dev_priv->num_fence_regs) { | ||
2278 | if (seqno == dev_priv->mm.next_gem_seqno) { | ||
2279 | i915_gem_flush(dev, | ||
2280 | I915_GEM_GPU_DOMAINS, | ||
2281 | I915_GEM_GPU_DOMAINS); | ||
2282 | seqno = i915_add_request(dev, NULL, | ||
2283 | I915_GEM_GPU_DOMAINS); | ||
2284 | if (seqno == 0) | ||
2285 | return -ENOMEM; | ||
2286 | } | 2294 | } |
2287 | 2295 | ||
2288 | ret = i915_wait_request(dev, seqno); | 2296 | break; |
2289 | if (ret) | ||
2290 | return ret; | ||
2291 | goto try_again; | ||
2292 | } | 2297 | } |
2293 | 2298 | ||
2294 | /* | 2299 | /* |
2295 | * Zap this virtual mapping so we can set up a fence again | 2300 | * Zap this virtual mapping so we can set up a fence again |
2296 | * for this object next time we need it. | 2301 | * for this object next time we need it. |
2297 | */ | 2302 | */ |
2298 | i915_gem_release_mmap(reg->obj); | 2303 | i915_gem_release_mmap(old_obj); |
2304 | |||
2305 | i = old_obj_priv->fence_reg; | ||
2306 | reg = &dev_priv->fence_regs[i]; | ||
2307 | |||
2299 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; | 2308 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; |
2309 | list_del_init(&old_obj_priv->fence_list); | ||
2310 | |||
2311 | drm_gem_object_unreference(old_obj); | ||
2300 | } | 2312 | } |
2301 | 2313 | ||
2302 | obj_priv->fence_reg = i; | 2314 | obj_priv->fence_reg = i; |
2315 | list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); | ||
2316 | |||
2303 | reg->obj = obj; | 2317 | reg->obj = obj; |
2304 | 2318 | ||
2305 | if (IS_I965G(dev)) | 2319 | if (IS_I965G(dev)) |
@@ -2342,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2342 | 2356 | ||
2343 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; | 2357 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; |
2344 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 2358 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
2359 | list_del_init(&obj_priv->fence_list); | ||
2345 | } | 2360 | } |
2346 | 2361 | ||
2347 | /** | 2362 | /** |
@@ -3595,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3595 | * Pre-965 chips need a fence register set up in order to | 3610 | * Pre-965 chips need a fence register set up in order to |
3596 | * properly handle tiled surfaces. | 3611 | * properly handle tiled surfaces. |
3597 | */ | 3612 | */ |
3598 | if (!IS_I965G(dev) && | 3613 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { |
3599 | obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
3600 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3601 | ret = i915_gem_object_get_fence_reg(obj); | 3614 | ret = i915_gem_object_get_fence_reg(obj); |
3602 | if (ret != 0) { | 3615 | if (ret != 0) { |
3603 | if (ret != -EBUSY && ret != -ERESTARTSYS) | 3616 | if (ret != -EBUSY && ret != -ERESTARTSYS) |
@@ -3806,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
3806 | obj_priv->obj = obj; | 3819 | obj_priv->obj = obj; |
3807 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 3820 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
3808 | INIT_LIST_HEAD(&obj_priv->list); | 3821 | INIT_LIST_HEAD(&obj_priv->list); |
3822 | INIT_LIST_HEAD(&obj_priv->fence_list); | ||
3809 | 3823 | ||
3810 | return 0; | 3824 | return 0; |
3811 | } | 3825 | } |
@@ -4218,15 +4232,11 @@ int | |||
4218 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 4232 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
4219 | struct drm_file *file_priv) | 4233 | struct drm_file *file_priv) |
4220 | { | 4234 | { |
4221 | int ret; | ||
4222 | |||
4223 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4235 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4224 | return 0; | 4236 | return 0; |
4225 | 4237 | ||
4226 | ret = i915_gem_idle(dev); | ||
4227 | drm_irq_uninstall(dev); | 4238 | drm_irq_uninstall(dev); |
4228 | 4239 | return i915_gem_idle(dev); | |
4229 | return ret; | ||
4230 | } | 4240 | } |
4231 | 4241 | ||
4232 | void | 4242 | void |
@@ -4253,6 +4263,7 @@ i915_gem_load(struct drm_device *dev) | |||
4253 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4263 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4254 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4264 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4255 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 4265 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
4266 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | ||
4256 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4267 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4257 | i915_gem_retire_work_handler); | 4268 | i915_gem_retire_work_handler); |
4258 | dev_priv->mm.next_gem_seqno = 1; | 4269 | dev_priv->mm.next_gem_seqno = 1; |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 300aee3296c2..f806fcc54e09 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -59,6 +59,16 @@ find_section(struct bdb_header *bdb, int section_id) | |||
59 | return NULL; | 59 | return NULL; |
60 | } | 60 | } |
61 | 61 | ||
62 | static u16 | ||
63 | get_blocksize(void *p) | ||
64 | { | ||
65 | u16 *block_ptr, block_size; | ||
66 | |||
67 | block_ptr = (u16 *)((char *)p - 2); | ||
68 | block_size = *block_ptr; | ||
69 | return block_size; | ||
70 | } | ||
71 | |||
62 | static void | 72 | static void |
63 | fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | 73 | fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, |
64 | struct lvds_dvo_timing *dvo_timing) | 74 | struct lvds_dvo_timing *dvo_timing) |
@@ -215,6 +225,41 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
215 | } | 225 | } |
216 | 226 | ||
217 | static void | 227 | static void |
228 | parse_general_definitions(struct drm_i915_private *dev_priv, | ||
229 | struct bdb_header *bdb) | ||
230 | { | ||
231 | struct bdb_general_definitions *general; | ||
232 | const int crt_bus_map_table[] = { | ||
233 | GPIOB, | ||
234 | GPIOA, | ||
235 | GPIOC, | ||
236 | GPIOD, | ||
237 | GPIOE, | ||
238 | GPIOF, | ||
239 | }; | ||
240 | |||
241 | /* Set sensible defaults in case we can't find the general block | ||
242 | or it is the wrong chipset */ | ||
243 | dev_priv->crt_ddc_bus = -1; | ||
244 | |||
245 | general = find_section(bdb, BDB_GENERAL_DEFINITIONS); | ||
246 | if (general) { | ||
247 | u16 block_size = get_blocksize(general); | ||
248 | if (block_size >= sizeof(*general)) { | ||
249 | int bus_pin = general->crt_ddc_gmbus_pin; | ||
250 | DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); | ||
251 | if ((bus_pin >= 1) && (bus_pin <= 6)) { | ||
252 | dev_priv->crt_ddc_bus = | ||
253 | crt_bus_map_table[bus_pin-1]; | ||
254 | } | ||
255 | } else { | ||
256 | DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", | ||
257 | block_size); | ||
258 | } | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static void | ||
218 | parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | 263 | parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, |
219 | struct bdb_header *bdb) | 264 | struct bdb_header *bdb) |
220 | { | 265 | { |
@@ -222,7 +267,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
222 | struct bdb_general_definitions *p_defs; | 267 | struct bdb_general_definitions *p_defs; |
223 | struct child_device_config *p_child; | 268 | struct child_device_config *p_child; |
224 | int i, child_device_num, count; | 269 | int i, child_device_num, count; |
225 | u16 block_size, *block_ptr; | 270 | u16 block_size; |
226 | 271 | ||
227 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 272 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
228 | if (!p_defs) { | 273 | if (!p_defs) { |
@@ -240,8 +285,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
240 | return; | 285 | return; |
241 | } | 286 | } |
242 | /* get the block size of general definitions */ | 287 | /* get the block size of general definitions */ |
243 | block_ptr = (u16 *)((char *)p_defs - 2); | 288 | block_size = get_blocksize(p_defs); |
244 | block_size = *block_ptr; | ||
245 | /* get the number of child device */ | 289 | /* get the number of child device */ |
246 | child_device_num = (block_size - sizeof(*p_defs)) / | 290 | child_device_num = (block_size - sizeof(*p_defs)) / |
247 | sizeof(*p_child); | 291 | sizeof(*p_child); |
@@ -362,6 +406,7 @@ intel_init_bios(struct drm_device *dev) | |||
362 | 406 | ||
363 | /* Grab useful general definitions */ | 407 | /* Grab useful general definitions */ |
364 | parse_general_features(dev_priv, bdb); | 408 | parse_general_features(dev_priv, bdb); |
409 | parse_general_definitions(dev_priv, bdb); | ||
365 | parse_lfp_panel_data(dev_priv, bdb); | 410 | parse_lfp_panel_data(dev_priv, bdb); |
366 | parse_sdvo_panel_data(dev_priv, bdb); | 411 | parse_sdvo_panel_data(dev_priv, bdb); |
367 | parse_sdvo_device_mapping(dev_priv, bdb); | 412 | parse_sdvo_device_mapping(dev_priv, bdb); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4cf8e2e88a40..590f81c8f594 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -508,6 +508,7 @@ void intel_crt_init(struct drm_device *dev) | |||
508 | { | 508 | { |
509 | struct drm_connector *connector; | 509 | struct drm_connector *connector; |
510 | struct intel_output *intel_output; | 510 | struct intel_output *intel_output; |
511 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
511 | u32 i2c_reg; | 512 | u32 i2c_reg; |
512 | 513 | ||
513 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 514 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); |
@@ -527,8 +528,12 @@ void intel_crt_init(struct drm_device *dev) | |||
527 | /* Set up the DDC bus. */ | 528 | /* Set up the DDC bus. */ |
528 | if (IS_IGDNG(dev)) | 529 | if (IS_IGDNG(dev)) |
529 | i2c_reg = PCH_GPIOA; | 530 | i2c_reg = PCH_GPIOA; |
530 | else | 531 | else { |
531 | i2c_reg = GPIOA; | 532 | i2c_reg = GPIOA; |
533 | /* Use VBT information for CRT DDC if available */ | ||
534 | if (dev_priv->crt_ddc_bus != -1) | ||
535 | i2c_reg = dev_priv->crt_ddc_bus; | ||
536 | } | ||
532 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | 537 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); |
533 | if (!intel_output->ddc_bus) { | 538 | if (!intel_output->ddc_bus) { |
534 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 539 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
@@ -537,6 +542,10 @@ void intel_crt_init(struct drm_device *dev) | |||
537 | } | 542 | } |
538 | 543 | ||
539 | intel_output->type = INTEL_OUTPUT_ANALOG; | 544 | intel_output->type = INTEL_OUTPUT_ANALOG; |
545 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
546 | (1 << INTEL_ANALOG_CLONE_BIT) | | ||
547 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
548 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
540 | connector->interlace_allowed = 0; | 549 | connector->interlace_allowed = 0; |
541 | connector->doublescan_allowed = 0; | 550 | connector->doublescan_allowed = 0; |
542 | 551 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d6fce2133413..748ed50c55ca 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -666,7 +666,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
666 | intel_clock_t clock; | 666 | intel_clock_t clock; |
667 | int err = target; | 667 | int err = target; |
668 | 668 | ||
669 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 669 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
670 | (I915_READ(LVDS)) != 0) { | 670 | (I915_READ(LVDS)) != 0) { |
671 | /* | 671 | /* |
672 | * For LVDS, if the panel is on, just rely on its current | 672 | * For LVDS, if the panel is on, just rely on its current |
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2005 | return; | 2005 | return; |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | const static int latency_ns = 3000; /* default for non-igd platforms */ | 2008 | /* |
2009 | * Latency for FIFO fetches is dependent on several factors: | ||
2010 | * - memory configuration (speed, channels) | ||
2011 | * - chipset | ||
2012 | * - current MCH state | ||
2013 | * It can be fairly high in some situations, so here we assume a fairly | ||
2014 | * pessimal value. It's a tradeoff between extra memory fetches (if we | ||
2015 | * set this value too high, the FIFO will fetch frequently to stay full) | ||
2016 | * and power consumption (set it too low to save power and we might see | ||
2017 | * FIFO underruns and display "flicker"). | ||
2018 | * | ||
2019 | * A value of 5us seems to be a good balance; safe for very low end | ||
2020 | * platforms but not overly aggressive on lower latency configs. | ||
2021 | */ | ||
2022 | const static int latency_ns = 5000; | ||
2009 | 2023 | ||
2010 | static int intel_get_fifo_size(struct drm_device *dev, int plane) | 2024 | static int intel_get_fifo_size(struct drm_device *dev, int plane) |
2011 | { | 2025 | { |
@@ -2396,7 +2410,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2396 | if (is_sdvo) { | 2410 | if (is_sdvo) { |
2397 | dpll |= DPLL_DVO_HIGH_SPEED; | 2411 | dpll |= DPLL_DVO_HIGH_SPEED; |
2398 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | 2412 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; |
2399 | if (IS_I945G(dev) || IS_I945GM(dev)) | 2413 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
2400 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | 2414 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
2401 | else if (IS_IGDNG(dev)) | 2415 | else if (IS_IGDNG(dev)) |
2402 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 2416 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
@@ -3170,7 +3184,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
3170 | 3184 | ||
3171 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 3185 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
3172 | struct intel_output *intel_output = to_intel_output(connector); | 3186 | struct intel_output *intel_output = to_intel_output(connector); |
3173 | if (type_mask & (1 << intel_output->type)) | 3187 | if (type_mask & intel_output->clone_mask) |
3174 | index_mask |= (1 << entry); | 3188 | index_mask |= (1 << entry); |
3175 | entry++; | 3189 | entry++; |
3176 | } | 3190 | } |
@@ -3218,30 +3232,30 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
3218 | intel_dp_init(dev, PCH_DP_D); | 3232 | intel_dp_init(dev, PCH_DP_D); |
3219 | 3233 | ||
3220 | } else if (IS_I9XX(dev)) { | 3234 | } else if (IS_I9XX(dev)) { |
3221 | int found; | 3235 | bool found = false; |
3222 | u32 reg; | ||
3223 | 3236 | ||
3224 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 3237 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
3225 | found = intel_sdvo_init(dev, SDVOB); | 3238 | found = intel_sdvo_init(dev, SDVOB); |
3226 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 3239 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
3227 | intel_hdmi_init(dev, SDVOB); | 3240 | intel_hdmi_init(dev, SDVOB); |
3241 | |||
3228 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 3242 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) |
3229 | intel_dp_init(dev, DP_B); | 3243 | intel_dp_init(dev, DP_B); |
3230 | } | 3244 | } |
3231 | 3245 | ||
3232 | /* Before G4X SDVOC doesn't have its own detect register */ | 3246 | /* Before G4X SDVOC doesn't have its own detect register */ |
3233 | if (IS_G4X(dev)) | ||
3234 | reg = SDVOC; | ||
3235 | else | ||
3236 | reg = SDVOB; | ||
3237 | 3247 | ||
3238 | if (I915_READ(reg) & SDVO_DETECTED) { | 3248 | if (I915_READ(SDVOB) & SDVO_DETECTED) |
3239 | found = intel_sdvo_init(dev, SDVOC); | 3249 | found = intel_sdvo_init(dev, SDVOC); |
3240 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 3250 | |
3251 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | ||
3252 | |||
3253 | if (SUPPORTS_INTEGRATED_HDMI(dev)) | ||
3241 | intel_hdmi_init(dev, SDVOC); | 3254 | intel_hdmi_init(dev, SDVOC); |
3242 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 3255 | if (SUPPORTS_INTEGRATED_DP(dev)) |
3243 | intel_dp_init(dev, DP_C); | 3256 | intel_dp_init(dev, DP_C); |
3244 | } | 3257 | } |
3258 | |||
3245 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 3259 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) |
3246 | intel_dp_init(dev, DP_D); | 3260 | intel_dp_init(dev, DP_D); |
3247 | } else | 3261 | } else |
@@ -3253,51 +3267,10 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
3253 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 3267 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
3254 | struct intel_output *intel_output = to_intel_output(connector); | 3268 | struct intel_output *intel_output = to_intel_output(connector); |
3255 | struct drm_encoder *encoder = &intel_output->enc; | 3269 | struct drm_encoder *encoder = &intel_output->enc; |
3256 | int crtc_mask = 0, clone_mask = 0; | ||
3257 | 3270 | ||
3258 | /* valid crtcs */ | 3271 | encoder->possible_crtcs = intel_output->crtc_mask; |
3259 | switch(intel_output->type) { | 3272 | encoder->possible_clones = intel_connector_clones(dev, |
3260 | case INTEL_OUTPUT_HDMI: | 3273 | intel_output->clone_mask); |
3261 | crtc_mask = ((1 << 0)| | ||
3262 | (1 << 1)); | ||
3263 | clone_mask = ((1 << INTEL_OUTPUT_HDMI)); | ||
3264 | break; | ||
3265 | case INTEL_OUTPUT_DVO: | ||
3266 | case INTEL_OUTPUT_SDVO: | ||
3267 | crtc_mask = ((1 << 0)| | ||
3268 | (1 << 1)); | ||
3269 | clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | | ||
3270 | (1 << INTEL_OUTPUT_DVO) | | ||
3271 | (1 << INTEL_OUTPUT_SDVO)); | ||
3272 | break; | ||
3273 | case INTEL_OUTPUT_ANALOG: | ||
3274 | crtc_mask = ((1 << 0)| | ||
3275 | (1 << 1)); | ||
3276 | clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | | ||
3277 | (1 << INTEL_OUTPUT_DVO) | | ||
3278 | (1 << INTEL_OUTPUT_SDVO)); | ||
3279 | break; | ||
3280 | case INTEL_OUTPUT_LVDS: | ||
3281 | crtc_mask = (1 << 1); | ||
3282 | clone_mask = (1 << INTEL_OUTPUT_LVDS); | ||
3283 | break; | ||
3284 | case INTEL_OUTPUT_TVOUT: | ||
3285 | crtc_mask = ((1 << 0) | | ||
3286 | (1 << 1)); | ||
3287 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); | ||
3288 | break; | ||
3289 | case INTEL_OUTPUT_DISPLAYPORT: | ||
3290 | crtc_mask = ((1 << 0) | | ||
3291 | (1 << 1)); | ||
3292 | clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); | ||
3293 | break; | ||
3294 | case INTEL_OUTPUT_EDP: | ||
3295 | crtc_mask = (1 << 1); | ||
3296 | clone_mask = (1 << INTEL_OUTPUT_EDP); | ||
3297 | break; | ||
3298 | } | ||
3299 | encoder->possible_crtcs = crtc_mask; | ||
3300 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); | ||
3301 | } | 3274 | } |
3302 | } | 3275 | } |
3303 | 3276 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index a6ff15ac548a..2b914d732076 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1254,6 +1254,18 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1254 | else | 1254 | else |
1255 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1255 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; |
1256 | 1256 | ||
1257 | if (output_reg == DP_B) | ||
1258 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | ||
1259 | else if (output_reg == DP_C) | ||
1260 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | ||
1261 | else if (output_reg == DP_D) | ||
1262 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | ||
1263 | |||
1264 | if (IS_eDP(intel_output)) { | ||
1265 | intel_output->crtc_mask = (1 << 1); | ||
1266 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | ||
1267 | } else | ||
1268 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
1257 | connector->interlace_allowed = true; | 1269 | connector->interlace_allowed = true; |
1258 | connector->doublescan_allowed = 0; | 1270 | connector->doublescan_allowed = 0; |
1259 | 1271 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d6f92ea1b553..26a6227c15fe 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -57,6 +57,25 @@ | |||
57 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 57 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
58 | #define INTEL_OUTPUT_EDP 8 | 58 | #define INTEL_OUTPUT_EDP 8 |
59 | 59 | ||
60 | /* Intel Pipe Clone Bit */ | ||
61 | #define INTEL_HDMIB_CLONE_BIT 1 | ||
62 | #define INTEL_HDMIC_CLONE_BIT 2 | ||
63 | #define INTEL_HDMID_CLONE_BIT 3 | ||
64 | #define INTEL_HDMIE_CLONE_BIT 4 | ||
65 | #define INTEL_HDMIF_CLONE_BIT 5 | ||
66 | #define INTEL_SDVO_NON_TV_CLONE_BIT 6 | ||
67 | #define INTEL_SDVO_TV_CLONE_BIT 7 | ||
68 | #define INTEL_SDVO_LVDS_CLONE_BIT 8 | ||
69 | #define INTEL_ANALOG_CLONE_BIT 9 | ||
70 | #define INTEL_TV_CLONE_BIT 10 | ||
71 | #define INTEL_DP_B_CLONE_BIT 11 | ||
72 | #define INTEL_DP_C_CLONE_BIT 12 | ||
73 | #define INTEL_DP_D_CLONE_BIT 13 | ||
74 | #define INTEL_LVDS_CLONE_BIT 14 | ||
75 | #define INTEL_DVO_TMDS_CLONE_BIT 15 | ||
76 | #define INTEL_DVO_LVDS_CLONE_BIT 16 | ||
77 | #define INTEL_EDP_CLONE_BIT 17 | ||
78 | |||
60 | #define INTEL_DVO_CHIP_NONE 0 | 79 | #define INTEL_DVO_CHIP_NONE 0 |
61 | #define INTEL_DVO_CHIP_LVDS 1 | 80 | #define INTEL_DVO_CHIP_LVDS 1 |
62 | #define INTEL_DVO_CHIP_TMDS 2 | 81 | #define INTEL_DVO_CHIP_TMDS 2 |
@@ -86,6 +105,8 @@ struct intel_output { | |||
86 | bool needs_tv_clock; | 105 | bool needs_tv_clock; |
87 | void *dev_priv; | 106 | void *dev_priv; |
88 | void (*hot_plug)(struct intel_output *); | 107 | void (*hot_plug)(struct intel_output *); |
108 | int crtc_mask; | ||
109 | int clone_mask; | ||
89 | }; | 110 | }; |
90 | 111 | ||
91 | struct intel_crtc { | 112 | struct intel_crtc { |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 13bff20930e8..a4d2606de778 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -435,14 +435,20 @@ void intel_dvo_init(struct drm_device *dev) | |||
435 | continue; | 435 | continue; |
436 | 436 | ||
437 | intel_output->type = INTEL_OUTPUT_DVO; | 437 | intel_output->type = INTEL_OUTPUT_DVO; |
438 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
438 | switch (dvo->type) { | 439 | switch (dvo->type) { |
439 | case INTEL_DVO_CHIP_TMDS: | 440 | case INTEL_DVO_CHIP_TMDS: |
441 | intel_output->clone_mask = | ||
442 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | | ||
443 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
440 | drm_connector_init(dev, connector, | 444 | drm_connector_init(dev, connector, |
441 | &intel_dvo_connector_funcs, | 445 | &intel_dvo_connector_funcs, |
442 | DRM_MODE_CONNECTOR_DVII); | 446 | DRM_MODE_CONNECTOR_DVII); |
443 | encoder_type = DRM_MODE_ENCODER_TMDS; | 447 | encoder_type = DRM_MODE_ENCODER_TMDS; |
444 | break; | 448 | break; |
445 | case INTEL_DVO_CHIP_LVDS: | 449 | case INTEL_DVO_CHIP_LVDS: |
450 | intel_output->clone_mask = | ||
451 | (1 << INTEL_DVO_LVDS_CLONE_BIT); | ||
446 | drm_connector_init(dev, connector, | 452 | drm_connector_init(dev, connector, |
447 | &intel_dvo_connector_funcs, | 453 | &intel_dvo_connector_funcs, |
448 | DRM_MODE_CONNECTOR_LVDS); | 454 | DRM_MODE_CONNECTOR_LVDS); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1842290cded3..fa304e136010 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -230,22 +230,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
230 | 230 | ||
231 | connector->interlace_allowed = 0; | 231 | connector->interlace_allowed = 0; |
232 | connector->doublescan_allowed = 0; | 232 | connector->doublescan_allowed = 0; |
233 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
233 | 234 | ||
234 | /* Set up the DDC bus. */ | 235 | /* Set up the DDC bus. */ |
235 | if (sdvox_reg == SDVOB) | 236 | if (sdvox_reg == SDVOB) { |
237 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | ||
236 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 238 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
237 | else if (sdvox_reg == SDVOC) | 239 | } else if (sdvox_reg == SDVOC) { |
240 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | ||
238 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 241 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
239 | else if (sdvox_reg == HDMIB) | 242 | } else if (sdvox_reg == HDMIB) { |
243 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | ||
240 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 244 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
241 | "HDMIB"); | 245 | "HDMIB"); |
242 | else if (sdvox_reg == HDMIC) | 246 | } else if (sdvox_reg == HDMIC) { |
247 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | ||
243 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 248 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
244 | "HDMIC"); | 249 | "HDMIC"); |
245 | else if (sdvox_reg == HDMID) | 250 | } else if (sdvox_reg == HDMID) { |
251 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | ||
246 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 252 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
247 | "HDMID"); | 253 | "HDMID"); |
248 | 254 | } | |
249 | if (!intel_output->ddc_bus) | 255 | if (!intel_output->ddc_bus) |
250 | goto err_connector; | 256 | goto err_connector; |
251 | 257 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3f445a80c552..8df02ef89261 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -916,6 +916,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
916 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 916 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
917 | intel_output->type = INTEL_OUTPUT_LVDS; | 917 | intel_output->type = INTEL_OUTPUT_LVDS; |
918 | 918 | ||
919 | intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | ||
920 | intel_output->crtc_mask = (1 << 1); | ||
919 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 921 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
920 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 922 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
921 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 923 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 5371d9332554..d3b74ba62b4a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1458,7 +1458,7 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output) | |||
1458 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | 1458 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) |
1459 | caps++; | 1459 | caps++; |
1460 | if (sdvo_priv->caps.output_flags & | 1460 | if (sdvo_priv->caps.output_flags & |
1461 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0)) | 1461 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) |
1462 | caps++; | 1462 | caps++; |
1463 | if (sdvo_priv->caps.output_flags & | 1463 | if (sdvo_priv->caps.output_flags & |
1464 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | 1464 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) |
@@ -1967,6 +1967,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
1967 | intel_sdvo_set_colorimetry(intel_output, | 1967 | intel_sdvo_set_colorimetry(intel_output, |
1968 | SDVO_COLORIMETRY_RGB256); | 1968 | SDVO_COLORIMETRY_RGB256); |
1969 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 1969 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
1970 | intel_output->clone_mask = | ||
1971 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
1972 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
1970 | } | 1973 | } |
1971 | } else if (flags & SDVO_OUTPUT_SVID0) { | 1974 | } else if (flags & SDVO_OUTPUT_SVID0) { |
1972 | 1975 | ||
@@ -1975,11 +1978,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
1975 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 1978 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
1976 | sdvo_priv->is_tv = true; | 1979 | sdvo_priv->is_tv = true; |
1977 | intel_output->needs_tv_clock = true; | 1980 | intel_output->needs_tv_clock = true; |
1981 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
1978 | } else if (flags & SDVO_OUTPUT_RGB0) { | 1982 | } else if (flags & SDVO_OUTPUT_RGB0) { |
1979 | 1983 | ||
1980 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | 1984 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; |
1981 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 1985 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
1982 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 1986 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
1987 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
1988 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
1983 | } else if (flags & SDVO_OUTPUT_RGB1) { | 1989 | } else if (flags & SDVO_OUTPUT_RGB1) { |
1984 | 1990 | ||
1985 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 1991 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
@@ -1991,12 +1997,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
1991 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 1997 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
1992 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 1998 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
1993 | sdvo_priv->is_lvds = true; | 1999 | sdvo_priv->is_lvds = true; |
2000 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | ||
2001 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
1994 | } else if (flags & SDVO_OUTPUT_LVDS1) { | 2002 | } else if (flags & SDVO_OUTPUT_LVDS1) { |
1995 | 2003 | ||
1996 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | 2004 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; |
1997 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2005 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
1998 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2006 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
1999 | sdvo_priv->is_lvds = true; | 2007 | sdvo_priv->is_lvds = true; |
2008 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | ||
2009 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
2000 | } else { | 2010 | } else { |
2001 | 2011 | ||
2002 | unsigned char bytes[2]; | 2012 | unsigned char bytes[2]; |
@@ -2009,6 +2019,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2009 | bytes[0], bytes[1]); | 2019 | bytes[0], bytes[1]); |
2010 | ret = false; | 2020 | ret = false; |
2011 | } | 2021 | } |
2022 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
2012 | 2023 | ||
2013 | if (ret && registered) | 2024 | if (ret && registered) |
2014 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | 2025 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index da4ab4dc1630..5b1c9e9fdba0 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1718,6 +1718,7 @@ intel_tv_init(struct drm_device *dev) | |||
1718 | if (!intel_output) { | 1718 | if (!intel_output) { |
1719 | return; | 1719 | return; |
1720 | } | 1720 | } |
1721 | |||
1721 | connector = &intel_output->base; | 1722 | connector = &intel_output->base; |
1722 | 1723 | ||
1723 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1724 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
@@ -1729,6 +1730,8 @@ intel_tv_init(struct drm_device *dev) | |||
1729 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1730 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
1730 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); | 1731 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); |
1731 | intel_output->type = INTEL_OUTPUT_TVOUT; | 1732 | intel_output->type = INTEL_OUTPUT_TVOUT; |
1733 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | ||
1734 | intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); | ||
1732 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1735 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); |
1733 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1736 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1734 | intel_output->dev_priv = tv_priv; | 1737 | intel_output->dev_priv = tv_priv; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f1ba8ff41130..68e728e8be4d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -254,6 +254,72 @@ void r100_mc_fini(struct radeon_device *rdev) | |||
254 | 254 | ||
255 | 255 | ||
256 | /* | 256 | /* |
257 | * Interrupts | ||
258 | */ | ||
259 | int r100_irq_set(struct radeon_device *rdev) | ||
260 | { | ||
261 | uint32_t tmp = 0; | ||
262 | |||
263 | if (rdev->irq.sw_int) { | ||
264 | tmp |= RADEON_SW_INT_ENABLE; | ||
265 | } | ||
266 | if (rdev->irq.crtc_vblank_int[0]) { | ||
267 | tmp |= RADEON_CRTC_VBLANK_MASK; | ||
268 | } | ||
269 | if (rdev->irq.crtc_vblank_int[1]) { | ||
270 | tmp |= RADEON_CRTC2_VBLANK_MASK; | ||
271 | } | ||
272 | WREG32(RADEON_GEN_INT_CNTL, tmp); | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | ||
277 | { | ||
278 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | ||
279 | uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | | ||
280 | RADEON_CRTC2_VBLANK_STAT; | ||
281 | |||
282 | if (irqs) { | ||
283 | WREG32(RADEON_GEN_INT_STATUS, irqs); | ||
284 | } | ||
285 | return irqs & irq_mask; | ||
286 | } | ||
287 | |||
288 | int r100_irq_process(struct radeon_device *rdev) | ||
289 | { | ||
290 | uint32_t status; | ||
291 | |||
292 | status = r100_irq_ack(rdev); | ||
293 | if (!status) { | ||
294 | return IRQ_NONE; | ||
295 | } | ||
296 | while (status) { | ||
297 | /* SW interrupt */ | ||
298 | if (status & RADEON_SW_INT_TEST) { | ||
299 | radeon_fence_process(rdev); | ||
300 | } | ||
301 | /* Vertical blank interrupts */ | ||
302 | if (status & RADEON_CRTC_VBLANK_STAT) { | ||
303 | drm_handle_vblank(rdev->ddev, 0); | ||
304 | } | ||
305 | if (status & RADEON_CRTC2_VBLANK_STAT) { | ||
306 | drm_handle_vblank(rdev->ddev, 1); | ||
307 | } | ||
308 | status = r100_irq_ack(rdev); | ||
309 | } | ||
310 | return IRQ_HANDLED; | ||
311 | } | ||
312 | |||
313 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | ||
314 | { | ||
315 | if (crtc == 0) | ||
316 | return RREG32(RADEON_CRTC_CRNT_FRAME); | ||
317 | else | ||
318 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | ||
319 | } | ||
320 | |||
321 | |||
322 | /* | ||
257 | * Fence emission | 323 | * Fence emission |
258 | */ | 324 | */ |
259 | void r100_fence_ring_emit(struct radeon_device *rdev, | 325 | void r100_fence_ring_emit(struct radeon_device *rdev, |
@@ -1025,6 +1091,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1025 | tmp |= tile_flags; | 1091 | tmp |= tile_flags; |
1026 | ib[idx] = tmp; | 1092 | ib[idx] = tmp; |
1027 | break; | 1093 | break; |
1094 | case RADEON_RB3D_ZPASS_ADDR: | ||
1095 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1096 | if (r) { | ||
1097 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1098 | idx, reg); | ||
1099 | r100_cs_dump_packet(p, pkt); | ||
1100 | return r; | ||
1101 | } | ||
1102 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | ||
1103 | break; | ||
1028 | default: | 1104 | default: |
1029 | /* FIXME: we don't want to allow anyothers packet */ | 1105 | /* FIXME: we don't want to allow anyothers packet */ |
1030 | break; | 1106 | break; |
@@ -1556,26 +1632,6 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
1556 | r100_pll_errata_after_data(rdev); | 1632 | r100_pll_errata_after_data(rdev); |
1557 | } | 1633 | } |
1558 | 1634 | ||
1559 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | ||
1560 | { | ||
1561 | if (reg < 0x10000) | ||
1562 | return readl(((void __iomem *)rdev->rmmio) + reg); | ||
1563 | else { | ||
1564 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
1565 | return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
1566 | } | ||
1567 | } | ||
1568 | |||
1569 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
1570 | { | ||
1571 | if (reg < 0x10000) | ||
1572 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | ||
1573 | else { | ||
1574 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
1575 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | int r100_init(struct radeon_device *rdev) | 1635 | int r100_init(struct radeon_device *rdev) |
1580 | { | 1636 | { |
1581 | return 0; | 1637 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 9c8d41534a5d..051bca6e3a4f 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -83,8 +83,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
83 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); | 83 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
84 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 84 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
85 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); | 85 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
86 | mb(); | ||
87 | } | 86 | } |
87 | mb(); | ||
88 | } | 88 | } |
89 | 89 | ||
90 | int rv370_pcie_gart_enable(struct radeon_device *rdev) | 90 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
@@ -448,6 +448,7 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
448 | /* rv350,rv370,rv380 */ | 448 | /* rv350,rv370,rv380 */ |
449 | rdev->num_gb_pipes = 1; | 449 | rdev->num_gb_pipes = 1; |
450 | } | 450 | } |
451 | rdev->num_z_pipes = 1; | ||
451 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); | 452 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
452 | switch (rdev->num_gb_pipes) { | 453 | switch (rdev->num_gb_pipes) { |
453 | case 2: | 454 | case 2: |
@@ -486,7 +487,8 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
486 | printk(KERN_WARNING "Failed to wait MC idle while " | 487 | printk(KERN_WARNING "Failed to wait MC idle while " |
487 | "programming pipes. Bad things might happen.\n"); | 488 | "programming pipes. Bad things might happen.\n"); |
488 | } | 489 | } |
489 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); | 490 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
491 | rdev->num_gb_pipes, rdev->num_z_pipes); | ||
490 | } | 492 | } |
491 | 493 | ||
492 | int r300_ga_reset(struct radeon_device *rdev) | 494 | int r300_ga_reset(struct radeon_device *rdev) |
@@ -593,27 +595,6 @@ void r300_vram_info(struct radeon_device *rdev) | |||
593 | 595 | ||
594 | 596 | ||
595 | /* | 597 | /* |
596 | * Indirect registers accessor | ||
597 | */ | ||
598 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | ||
599 | { | ||
600 | uint32_t r; | ||
601 | |||
602 | WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); | ||
603 | (void)RREG32(RADEON_PCIE_INDEX); | ||
604 | r = RREG32(RADEON_PCIE_DATA); | ||
605 | return r; | ||
606 | } | ||
607 | |||
608 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
609 | { | ||
610 | WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); | ||
611 | (void)RREG32(RADEON_PCIE_INDEX); | ||
612 | WREG32(RADEON_PCIE_DATA, (v)); | ||
613 | (void)RREG32(RADEON_PCIE_DATA); | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | * PCIE Lanes | 598 | * PCIE Lanes |
618 | */ | 599 | */ |
619 | 600 | ||
@@ -1014,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = { | |||
1014 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 995 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
1015 | 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, | 996 | 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, |
1016 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 997 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
1017 | 0x0003FC01, 0xFFFFFFF8, 0xFE800B19, | 998 | 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, |
1018 | }; | 999 | }; |
1019 | 1000 | ||
1020 | static int r300_packet0_check(struct radeon_cs_parser *p, | 1001 | static int r300_packet0_check(struct radeon_cs_parser *p, |
@@ -1403,6 +1384,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1403 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; | 1384 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; |
1404 | track->textures[i].txdepth = tmp; | 1385 | track->textures[i].txdepth = tmp; |
1405 | break; | 1386 | break; |
1387 | case R300_ZB_ZPASS_ADDR: | ||
1388 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1389 | if (r) { | ||
1390 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1391 | idx, reg); | ||
1392 | r100_cs_dump_packet(p, pkt); | ||
1393 | return r; | ||
1394 | } | ||
1395 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | ||
1396 | break; | ||
1397 | case 0x4be8: | ||
1398 | /* valid register only on RV530 */ | ||
1399 | if (p->rdev->family == CHIP_RV530) | ||
1400 | break; | ||
1401 | /* fallthrough do not move */ | ||
1406 | default: | 1402 | default: |
1407 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1403 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1408 | reg, idx); | 1404 | reg, idx); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index dea497a979f2..97426a6f370f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -165,7 +165,18 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
165 | printk(KERN_WARNING "Failed to wait GUI idle while " | 165 | printk(KERN_WARNING "Failed to wait GUI idle while " |
166 | "programming pipes. Bad things might happen.\n"); | 166 | "programming pipes. Bad things might happen.\n"); |
167 | } | 167 | } |
168 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); | 168 | |
169 | if (rdev->family == CHIP_RV530) { | ||
170 | tmp = RREG32(RV530_GB_PIPE_SELECT2); | ||
171 | if ((tmp & 3) == 3) | ||
172 | rdev->num_z_pipes = 2; | ||
173 | else | ||
174 | rdev->num_z_pipes = 1; | ||
175 | } else | ||
176 | rdev->num_z_pipes = 1; | ||
177 | |||
178 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", | ||
179 | rdev->num_gb_pipes, rdev->num_z_pipes); | ||
169 | } | 180 | } |
170 | 181 | ||
171 | void r420_gpu_init(struct radeon_device *rdev) | 182 | void r420_gpu_init(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 036691b38cb7..e1d5e0331e19 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -350,6 +350,7 @@ | |||
350 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 | 350 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 |
351 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 | 351 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 |
352 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c | 352 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c |
353 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 | ||
353 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 | 354 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
354 | 355 | ||
355 | /* master controls */ | 356 | /* master controls */ |
@@ -438,14 +439,15 @@ | |||
438 | # define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 | 439 | # define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 |
439 | # define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff | 440 | # define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff |
440 | 441 | ||
441 | #define R500_DxMODE_INT_MASK 0x6540 | ||
442 | #define R500_D1MODE_INT_MASK (1<<0) | ||
443 | #define R500_D2MODE_INT_MASK (1<<8) | ||
444 | |||
445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 | 442 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 |
446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) | 443 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) |
447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C | 444 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C |
445 | #define AVIVO_D1MODE_VBLANK_STATUS 0x6534 | ||
446 | # define AVIVO_VBLANK_ACK (1 << 4) | ||
448 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 | 447 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 |
448 | #define AVIVO_DxMODE_INT_MASK 0x6540 | ||
449 | # define AVIVO_D1MODE_INT_MASK (1 << 0) | ||
450 | # define AVIVO_D2MODE_INT_MASK (1 << 8) | ||
449 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 | 451 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 |
450 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 | 452 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 |
451 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 | 453 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 |
@@ -475,6 +477,7 @@ | |||
475 | #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 | 477 | #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 |
476 | #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 | 478 | #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 |
477 | #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c | 479 | #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c |
480 | #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 | ||
478 | #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 | 481 | #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 |
479 | 482 | ||
480 | #define AVIVO_D2GRPH_ENABLE 0x6900 | 483 | #define AVIVO_D2GRPH_ENABLE 0x6900 |
@@ -497,6 +500,7 @@ | |||
497 | #define AVIVO_D2CUR_SIZE 0x6c10 | 500 | #define AVIVO_D2CUR_SIZE 0x6c10 |
498 | #define AVIVO_D2CUR_POSITION 0x6c14 | 501 | #define AVIVO_D2CUR_POSITION 0x6c14 |
499 | 502 | ||
503 | #define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 | ||
500 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 | 504 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 |
501 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 | 505 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
502 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 | 506 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
@@ -748,4 +752,8 @@ | |||
748 | # define AVIVO_I2C_EN (1 << 0) | 752 | # define AVIVO_I2C_EN (1 << 0) |
749 | # define AVIVO_I2C_RESET (1 << 8) | 753 | # define AVIVO_I2C_RESET (1 << 8) |
750 | 754 | ||
755 | #define AVIVO_DISP_INTERRUPT_STATUS 0x7edc | ||
756 | # define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) | ||
757 | # define AVIVO_D2_VBLANK_INTERRUPT (1 << 5) | ||
758 | |||
751 | #endif | 759 | #endif |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 09fb0b6ec7dd..ebd6b0f7bdff 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -177,7 +177,6 @@ void r520_gpu_init(struct radeon_device *rdev) | |||
177 | */ | 177 | */ |
178 | /* workaround for RV530 */ | 178 | /* workaround for RV530 */ |
179 | if (rdev->family == CHIP_RV530) { | 179 | if (rdev->family == CHIP_RV530) { |
180 | WREG32(0x4124, 1); | ||
181 | WREG32(0x4128, 0xFF); | 180 | WREG32(0x4128, 0xFF); |
182 | } | 181 | } |
183 | r420_pipes_init(rdev); | 182 | r420_pipes_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b1d945b8ed6c..b519fb2fecbb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -242,6 +242,7 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
242 | uint64_t *gpu_addr); | 242 | uint64_t *gpu_addr); |
243 | void radeon_object_unpin(struct radeon_object *robj); | 243 | void radeon_object_unpin(struct radeon_object *robj); |
244 | int radeon_object_wait(struct radeon_object *robj); | 244 | int radeon_object_wait(struct radeon_object *robj); |
245 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); | ||
245 | int radeon_object_evict_vram(struct radeon_device *rdev); | 246 | int radeon_object_evict_vram(struct radeon_device *rdev); |
246 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); | 247 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); |
247 | void radeon_object_force_delete(struct radeon_device *rdev); | 248 | void radeon_object_force_delete(struct radeon_device *rdev); |
@@ -574,6 +575,7 @@ struct radeon_asic { | |||
574 | void (*ring_start)(struct radeon_device *rdev); | 575 | void (*ring_start)(struct radeon_device *rdev); |
575 | int (*irq_set)(struct radeon_device *rdev); | 576 | int (*irq_set)(struct radeon_device *rdev); |
576 | int (*irq_process)(struct radeon_device *rdev); | 577 | int (*irq_process)(struct radeon_device *rdev); |
578 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | ||
577 | void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); | 579 | void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); |
578 | int (*cs_parse)(struct radeon_cs_parser *p); | 580 | int (*cs_parse)(struct radeon_cs_parser *p); |
579 | int (*copy_blit)(struct radeon_device *rdev, | 581 | int (*copy_blit)(struct radeon_device *rdev, |
@@ -653,6 +655,7 @@ struct radeon_device { | |||
653 | int usec_timeout; | 655 | int usec_timeout; |
654 | enum radeon_pll_errata pll_errata; | 656 | enum radeon_pll_errata pll_errata; |
655 | int num_gb_pipes; | 657 | int num_gb_pipes; |
658 | int num_z_pipes; | ||
656 | int disp_priority; | 659 | int disp_priority; |
657 | /* BIOS */ | 660 | /* BIOS */ |
658 | uint8_t *bios; | 661 | uint8_t *bios; |
@@ -666,14 +669,11 @@ struct radeon_device { | |||
666 | resource_size_t rmmio_base; | 669 | resource_size_t rmmio_base; |
667 | resource_size_t rmmio_size; | 670 | resource_size_t rmmio_size; |
668 | void *rmmio; | 671 | void *rmmio; |
669 | radeon_rreg_t mm_rreg; | ||
670 | radeon_wreg_t mm_wreg; | ||
671 | radeon_rreg_t mc_rreg; | 672 | radeon_rreg_t mc_rreg; |
672 | radeon_wreg_t mc_wreg; | 673 | radeon_wreg_t mc_wreg; |
673 | radeon_rreg_t pll_rreg; | 674 | radeon_rreg_t pll_rreg; |
674 | radeon_wreg_t pll_wreg; | 675 | radeon_wreg_t pll_wreg; |
675 | radeon_rreg_t pcie_rreg; | 676 | uint32_t pcie_reg_mask; |
676 | radeon_wreg_t pcie_wreg; | ||
677 | radeon_rreg_t pciep_rreg; | 677 | radeon_rreg_t pciep_rreg; |
678 | radeon_wreg_t pciep_wreg; | 678 | radeon_wreg_t pciep_wreg; |
679 | struct radeon_clock clock; | 679 | struct radeon_clock clock; |
@@ -705,22 +705,42 @@ int radeon_device_init(struct radeon_device *rdev, | |||
705 | void radeon_device_fini(struct radeon_device *rdev); | 705 | void radeon_device_fini(struct radeon_device *rdev); |
706 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | 706 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
707 | 707 | ||
708 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | ||
709 | { | ||
710 | if (reg < 0x10000) | ||
711 | return readl(((void __iomem *)rdev->rmmio) + reg); | ||
712 | else { | ||
713 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
714 | return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
715 | } | ||
716 | } | ||
717 | |||
718 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
719 | { | ||
720 | if (reg < 0x10000) | ||
721 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | ||
722 | else { | ||
723 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
724 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
725 | } | ||
726 | } | ||
727 | |||
708 | 728 | ||
709 | /* | 729 | /* |
710 | * Registers read & write functions. | 730 | * Registers read & write functions. |
711 | */ | 731 | */ |
712 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) | 732 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) |
713 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) | 733 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) |
714 | #define RREG32(reg) rdev->mm_rreg(rdev, (reg)) | 734 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
715 | #define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v)) | 735 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
716 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) | 736 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
717 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) | 737 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
718 | #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) | 738 | #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) |
719 | #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) | 739 | #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) |
720 | #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) | 740 | #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) |
721 | #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) | 741 | #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) |
722 | #define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg)) | 742 | #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) |
723 | #define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v)) | 743 | #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) |
724 | #define WREG32_P(reg, val, mask) \ | 744 | #define WREG32_P(reg, val, mask) \ |
725 | do { \ | 745 | do { \ |
726 | uint32_t tmp_ = RREG32(reg); \ | 746 | uint32_t tmp_ = RREG32(reg); \ |
@@ -736,6 +756,24 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | |||
736 | WREG32_PLL(reg, tmp_); \ | 756 | WREG32_PLL(reg, tmp_); \ |
737 | } while (0) | 757 | } while (0) |
738 | 758 | ||
759 | /* | ||
760 | * Indirect registers accessor | ||
761 | */ | ||
762 | static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | ||
763 | { | ||
764 | uint32_t r; | ||
765 | |||
766 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); | ||
767 | r = RREG32(RADEON_PCIE_DATA); | ||
768 | return r; | ||
769 | } | ||
770 | |||
771 | static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
772 | { | ||
773 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); | ||
774 | WREG32(RADEON_PCIE_DATA, (v)); | ||
775 | } | ||
776 | |||
739 | void r100_pll_errata_after_index(struct radeon_device *rdev); | 777 | void r100_pll_errata_after_index(struct radeon_device *rdev); |
740 | 778 | ||
741 | 779 | ||
@@ -862,6 +900,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
862 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) | 900 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
863 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) | 901 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
864 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) | 902 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
903 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) | ||
865 | #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) | 904 | #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) |
866 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) | 905 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) |
867 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) | 906 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 9a75876e0c3b..93d8f8889302 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -49,6 +49,7 @@ void r100_vram_info(struct radeon_device *rdev); | |||
49 | int r100_gpu_reset(struct radeon_device *rdev); | 49 | int r100_gpu_reset(struct radeon_device *rdev); |
50 | int r100_mc_init(struct radeon_device *rdev); | 50 | int r100_mc_init(struct radeon_device *rdev); |
51 | void r100_mc_fini(struct radeon_device *rdev); | 51 | void r100_mc_fini(struct radeon_device *rdev); |
52 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | ||
52 | int r100_wb_init(struct radeon_device *rdev); | 53 | int r100_wb_init(struct radeon_device *rdev); |
53 | void r100_wb_fini(struct radeon_device *rdev); | 54 | void r100_wb_fini(struct radeon_device *rdev); |
54 | int r100_gart_enable(struct radeon_device *rdev); | 55 | int r100_gart_enable(struct radeon_device *rdev); |
@@ -96,6 +97,7 @@ static struct radeon_asic r100_asic = { | |||
96 | .ring_start = &r100_ring_start, | 97 | .ring_start = &r100_ring_start, |
97 | .irq_set = &r100_irq_set, | 98 | .irq_set = &r100_irq_set, |
98 | .irq_process = &r100_irq_process, | 99 | .irq_process = &r100_irq_process, |
100 | .get_vblank_counter = &r100_get_vblank_counter, | ||
99 | .fence_ring_emit = &r100_fence_ring_emit, | 101 | .fence_ring_emit = &r100_fence_ring_emit, |
100 | .cs_parse = &r100_cs_parse, | 102 | .cs_parse = &r100_cs_parse, |
101 | .copy_blit = &r100_copy_blit, | 103 | .copy_blit = &r100_copy_blit, |
@@ -156,6 +158,7 @@ static struct radeon_asic r300_asic = { | |||
156 | .ring_start = &r300_ring_start, | 158 | .ring_start = &r300_ring_start, |
157 | .irq_set = &r100_irq_set, | 159 | .irq_set = &r100_irq_set, |
158 | .irq_process = &r100_irq_process, | 160 | .irq_process = &r100_irq_process, |
161 | .get_vblank_counter = &r100_get_vblank_counter, | ||
159 | .fence_ring_emit = &r300_fence_ring_emit, | 162 | .fence_ring_emit = &r300_fence_ring_emit, |
160 | .cs_parse = &r300_cs_parse, | 163 | .cs_parse = &r300_cs_parse, |
161 | .copy_blit = &r100_copy_blit, | 164 | .copy_blit = &r100_copy_blit, |
@@ -196,6 +199,7 @@ static struct radeon_asic r420_asic = { | |||
196 | .ring_start = &r300_ring_start, | 199 | .ring_start = &r300_ring_start, |
197 | .irq_set = &r100_irq_set, | 200 | .irq_set = &r100_irq_set, |
198 | .irq_process = &r100_irq_process, | 201 | .irq_process = &r100_irq_process, |
202 | .get_vblank_counter = &r100_get_vblank_counter, | ||
199 | .fence_ring_emit = &r300_fence_ring_emit, | 203 | .fence_ring_emit = &r300_fence_ring_emit, |
200 | .cs_parse = &r300_cs_parse, | 204 | .cs_parse = &r300_cs_parse, |
201 | .copy_blit = &r100_copy_blit, | 205 | .copy_blit = &r100_copy_blit, |
@@ -243,6 +247,7 @@ static struct radeon_asic rs400_asic = { | |||
243 | .ring_start = &r300_ring_start, | 247 | .ring_start = &r300_ring_start, |
244 | .irq_set = &r100_irq_set, | 248 | .irq_set = &r100_irq_set, |
245 | .irq_process = &r100_irq_process, | 249 | .irq_process = &r100_irq_process, |
250 | .get_vblank_counter = &r100_get_vblank_counter, | ||
246 | .fence_ring_emit = &r300_fence_ring_emit, | 251 | .fence_ring_emit = &r300_fence_ring_emit, |
247 | .cs_parse = &r300_cs_parse, | 252 | .cs_parse = &r300_cs_parse, |
248 | .copy_blit = &r100_copy_blit, | 253 | .copy_blit = &r100_copy_blit, |
@@ -261,11 +266,14 @@ static struct radeon_asic rs400_asic = { | |||
261 | /* | 266 | /* |
262 | * rs600. | 267 | * rs600. |
263 | */ | 268 | */ |
269 | int rs600_init(struct radeon_device *dev); | ||
264 | void rs600_errata(struct radeon_device *rdev); | 270 | void rs600_errata(struct radeon_device *rdev); |
265 | void rs600_vram_info(struct radeon_device *rdev); | 271 | void rs600_vram_info(struct radeon_device *rdev); |
266 | int rs600_mc_init(struct radeon_device *rdev); | 272 | int rs600_mc_init(struct radeon_device *rdev); |
267 | void rs600_mc_fini(struct radeon_device *rdev); | 273 | void rs600_mc_fini(struct radeon_device *rdev); |
268 | int rs600_irq_set(struct radeon_device *rdev); | 274 | int rs600_irq_set(struct radeon_device *rdev); |
275 | int rs600_irq_process(struct radeon_device *rdev); | ||
276 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | ||
269 | int rs600_gart_enable(struct radeon_device *rdev); | 277 | int rs600_gart_enable(struct radeon_device *rdev); |
270 | void rs600_gart_disable(struct radeon_device *rdev); | 278 | void rs600_gart_disable(struct radeon_device *rdev); |
271 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 279 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
@@ -274,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | |||
274 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 282 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
275 | void rs600_bandwidth_update(struct radeon_device *rdev); | 283 | void rs600_bandwidth_update(struct radeon_device *rdev); |
276 | static struct radeon_asic rs600_asic = { | 284 | static struct radeon_asic rs600_asic = { |
277 | .init = &r300_init, | 285 | .init = &rs600_init, |
278 | .errata = &rs600_errata, | 286 | .errata = &rs600_errata, |
279 | .vram_info = &rs600_vram_info, | 287 | .vram_info = &rs600_vram_info, |
280 | .gpu_reset = &r300_gpu_reset, | 288 | .gpu_reset = &r300_gpu_reset, |
@@ -291,7 +299,8 @@ static struct radeon_asic rs600_asic = { | |||
291 | .cp_disable = &r100_cp_disable, | 299 | .cp_disable = &r100_cp_disable, |
292 | .ring_start = &r300_ring_start, | 300 | .ring_start = &r300_ring_start, |
293 | .irq_set = &rs600_irq_set, | 301 | .irq_set = &rs600_irq_set, |
294 | .irq_process = &r100_irq_process, | 302 | .irq_process = &rs600_irq_process, |
303 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
295 | .fence_ring_emit = &r300_fence_ring_emit, | 304 | .fence_ring_emit = &r300_fence_ring_emit, |
296 | .cs_parse = &r300_cs_parse, | 305 | .cs_parse = &r300_cs_parse, |
297 | .copy_blit = &r100_copy_blit, | 306 | .copy_blit = &r100_copy_blit, |
@@ -316,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | |||
316 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 325 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
317 | void rs690_bandwidth_update(struct radeon_device *rdev); | 326 | void rs690_bandwidth_update(struct radeon_device *rdev); |
318 | static struct radeon_asic rs690_asic = { | 327 | static struct radeon_asic rs690_asic = { |
319 | .init = &r300_init, | 328 | .init = &rs600_init, |
320 | .errata = &rs690_errata, | 329 | .errata = &rs690_errata, |
321 | .vram_info = &rs690_vram_info, | 330 | .vram_info = &rs690_vram_info, |
322 | .gpu_reset = &r300_gpu_reset, | 331 | .gpu_reset = &r300_gpu_reset, |
@@ -333,7 +342,8 @@ static struct radeon_asic rs690_asic = { | |||
333 | .cp_disable = &r100_cp_disable, | 342 | .cp_disable = &r100_cp_disable, |
334 | .ring_start = &r300_ring_start, | 343 | .ring_start = &r300_ring_start, |
335 | .irq_set = &rs600_irq_set, | 344 | .irq_set = &rs600_irq_set, |
336 | .irq_process = &r100_irq_process, | 345 | .irq_process = &rs600_irq_process, |
346 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
337 | .fence_ring_emit = &r300_fence_ring_emit, | 347 | .fence_ring_emit = &r300_fence_ring_emit, |
338 | .cs_parse = &r300_cs_parse, | 348 | .cs_parse = &r300_cs_parse, |
339 | .copy_blit = &r100_copy_blit, | 349 | .copy_blit = &r100_copy_blit, |
@@ -381,8 +391,9 @@ static struct radeon_asic rv515_asic = { | |||
381 | .cp_fini = &r100_cp_fini, | 391 | .cp_fini = &r100_cp_fini, |
382 | .cp_disable = &r100_cp_disable, | 392 | .cp_disable = &r100_cp_disable, |
383 | .ring_start = &rv515_ring_start, | 393 | .ring_start = &rv515_ring_start, |
384 | .irq_set = &r100_irq_set, | 394 | .irq_set = &rs600_irq_set, |
385 | .irq_process = &r100_irq_process, | 395 | .irq_process = &rs600_irq_process, |
396 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
386 | .fence_ring_emit = &r300_fence_ring_emit, | 397 | .fence_ring_emit = &r300_fence_ring_emit, |
387 | .cs_parse = &r300_cs_parse, | 398 | .cs_parse = &r300_cs_parse, |
388 | .copy_blit = &r100_copy_blit, | 399 | .copy_blit = &r100_copy_blit, |
@@ -423,8 +434,9 @@ static struct radeon_asic r520_asic = { | |||
423 | .cp_fini = &r100_cp_fini, | 434 | .cp_fini = &r100_cp_fini, |
424 | .cp_disable = &r100_cp_disable, | 435 | .cp_disable = &r100_cp_disable, |
425 | .ring_start = &rv515_ring_start, | 436 | .ring_start = &rv515_ring_start, |
426 | .irq_set = &r100_irq_set, | 437 | .irq_set = &rs600_irq_set, |
427 | .irq_process = &r100_irq_process, | 438 | .irq_process = &rs600_irq_process, |
439 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
428 | .fence_ring_emit = &r300_fence_ring_emit, | 440 | .fence_ring_emit = &r300_fence_ring_emit, |
429 | .cs_parse = &r300_cs_parse, | 441 | .cs_parse = &r300_cs_parse, |
430 | .copy_blit = &r100_copy_blit, | 442 | .copy_blit = &r100_copy_blit, |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index afc4db280b94..2a027e00762a 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -685,23 +685,15 @@ static const uint32_t default_tvdac_adj[CHIP_LAST] = { | |||
685 | 0x00780000, /* rs480 */ | 685 | 0x00780000, /* rs480 */ |
686 | }; | 686 | }; |
687 | 687 | ||
688 | static struct radeon_encoder_tv_dac | 688 | static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev, |
689 | *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev) | 689 | struct radeon_encoder_tv_dac *tv_dac) |
690 | { | 690 | { |
691 | struct radeon_encoder_tv_dac *tv_dac = NULL; | ||
692 | |||
693 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); | ||
694 | |||
695 | if (!tv_dac) | ||
696 | return NULL; | ||
697 | |||
698 | tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; | 691 | tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; |
699 | if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) | 692 | if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) |
700 | tv_dac->ps2_tvdac_adj = 0x00880000; | 693 | tv_dac->ps2_tvdac_adj = 0x00880000; |
701 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 694 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
702 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 695 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
703 | 696 | return; | |
704 | return tv_dac; | ||
705 | } | 697 | } |
706 | 698 | ||
707 | struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | 699 | struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct |
@@ -713,19 +705,18 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
713 | uint16_t dac_info; | 705 | uint16_t dac_info; |
714 | uint8_t rev, bg, dac; | 706 | uint8_t rev, bg, dac; |
715 | struct radeon_encoder_tv_dac *tv_dac = NULL; | 707 | struct radeon_encoder_tv_dac *tv_dac = NULL; |
708 | int found = 0; | ||
709 | |||
710 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); | ||
711 | if (!tv_dac) | ||
712 | return NULL; | ||
716 | 713 | ||
717 | if (rdev->bios == NULL) | 714 | if (rdev->bios == NULL) |
718 | return radeon_legacy_get_tv_dac_info_from_table(rdev); | 715 | goto out; |
719 | 716 | ||
720 | /* first check TV table */ | 717 | /* first check TV table */ |
721 | dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); | 718 | dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); |
722 | if (dac_info) { | 719 | if (dac_info) { |
723 | tv_dac = | ||
724 | kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); | ||
725 | |||
726 | if (!tv_dac) | ||
727 | return NULL; | ||
728 | |||
729 | rev = RBIOS8(dac_info + 0x3); | 720 | rev = RBIOS8(dac_info + 0x3); |
730 | if (rev > 4) { | 721 | if (rev > 4) { |
731 | bg = RBIOS8(dac_info + 0xc) & 0xf; | 722 | bg = RBIOS8(dac_info + 0xc) & 0xf; |
@@ -739,6 +730,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
739 | bg = RBIOS8(dac_info + 0x10) & 0xf; | 730 | bg = RBIOS8(dac_info + 0x10) & 0xf; |
740 | dac = RBIOS8(dac_info + 0x11) & 0xf; | 731 | dac = RBIOS8(dac_info + 0x11) & 0xf; |
741 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 732 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
733 | found = 1; | ||
742 | } else if (rev > 1) { | 734 | } else if (rev > 1) { |
743 | bg = RBIOS8(dac_info + 0xc) & 0xf; | 735 | bg = RBIOS8(dac_info + 0xc) & 0xf; |
744 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; | 736 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; |
@@ -751,22 +743,15 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
751 | bg = RBIOS8(dac_info + 0xe) & 0xf; | 743 | bg = RBIOS8(dac_info + 0xe) & 0xf; |
752 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; | 744 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; |
753 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 745 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
746 | found = 1; | ||
754 | } | 747 | } |
755 | |||
756 | tv_dac->tv_std = radeon_combios_get_tv_info(encoder); | 748 | tv_dac->tv_std = radeon_combios_get_tv_info(encoder); |
757 | 749 | } | |
758 | } else { | 750 | if (!found) { |
759 | /* then check CRT table */ | 751 | /* then check CRT table */ |
760 | dac_info = | 752 | dac_info = |
761 | combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); | 753 | combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); |
762 | if (dac_info) { | 754 | if (dac_info) { |
763 | tv_dac = | ||
764 | kzalloc(sizeof(struct radeon_encoder_tv_dac), | ||
765 | GFP_KERNEL); | ||
766 | |||
767 | if (!tv_dac) | ||
768 | return NULL; | ||
769 | |||
770 | rev = RBIOS8(dac_info) & 0x3; | 755 | rev = RBIOS8(dac_info) & 0x3; |
771 | if (rev < 2) { | 756 | if (rev < 2) { |
772 | bg = RBIOS8(dac_info + 0x3) & 0xf; | 757 | bg = RBIOS8(dac_info + 0x3) & 0xf; |
@@ -775,6 +760,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
775 | (bg << 16) | (dac << 20); | 760 | (bg << 16) | (dac << 20); |
776 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 761 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
777 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 762 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
763 | found = 1; | ||
778 | } else { | 764 | } else { |
779 | bg = RBIOS8(dac_info + 0x4) & 0xf; | 765 | bg = RBIOS8(dac_info + 0x4) & 0xf; |
780 | dac = RBIOS8(dac_info + 0x5) & 0xf; | 766 | dac = RBIOS8(dac_info + 0x5) & 0xf; |
@@ -782,13 +768,17 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
782 | (bg << 16) | (dac << 20); | 768 | (bg << 16) | (dac << 20); |
783 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 769 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
784 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 770 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
771 | found = 1; | ||
785 | } | 772 | } |
786 | } else { | 773 | } else { |
787 | DRM_INFO("No TV DAC info found in BIOS\n"); | 774 | DRM_INFO("No TV DAC info found in BIOS\n"); |
788 | return radeon_legacy_get_tv_dac_info_from_table(rdev); | ||
789 | } | 775 | } |
790 | } | 776 | } |
791 | 777 | ||
778 | out: | ||
779 | if (!found) /* fallback to defaults */ | ||
780 | radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); | ||
781 | |||
792 | return tv_dac; | 782 | return tv_dac; |
793 | } | 783 | } |
794 | 784 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index d8356827ef17..7a52c461145c 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -406,6 +406,15 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | |||
406 | { | 406 | { |
407 | uint32_t gb_tile_config, gb_pipe_sel = 0; | 407 | uint32_t gb_tile_config, gb_pipe_sel = 0; |
408 | 408 | ||
409 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { | ||
410 | uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); | ||
411 | if ((z_pipe_sel & 3) == 3) | ||
412 | dev_priv->num_z_pipes = 2; | ||
413 | else | ||
414 | dev_priv->num_z_pipes = 1; | ||
415 | } else | ||
416 | dev_priv->num_z_pipes = 1; | ||
417 | |||
409 | /* RS4xx/RS6xx/R4xx/R5xx */ | 418 | /* RS4xx/RS6xx/R4xx/R5xx */ |
410 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { | 419 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { |
411 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); | 420 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 9ff6dcb97f9d..7693f7c67bd3 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -225,25 +225,18 @@ void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
225 | 225 | ||
226 | void radeon_register_accessor_init(struct radeon_device *rdev) | 226 | void radeon_register_accessor_init(struct radeon_device *rdev) |
227 | { | 227 | { |
228 | rdev->mm_rreg = &r100_mm_rreg; | ||
229 | rdev->mm_wreg = &r100_mm_wreg; | ||
230 | rdev->mc_rreg = &radeon_invalid_rreg; | 228 | rdev->mc_rreg = &radeon_invalid_rreg; |
231 | rdev->mc_wreg = &radeon_invalid_wreg; | 229 | rdev->mc_wreg = &radeon_invalid_wreg; |
232 | rdev->pll_rreg = &radeon_invalid_rreg; | 230 | rdev->pll_rreg = &radeon_invalid_rreg; |
233 | rdev->pll_wreg = &radeon_invalid_wreg; | 231 | rdev->pll_wreg = &radeon_invalid_wreg; |
234 | rdev->pcie_rreg = &radeon_invalid_rreg; | ||
235 | rdev->pcie_wreg = &radeon_invalid_wreg; | ||
236 | rdev->pciep_rreg = &radeon_invalid_rreg; | 232 | rdev->pciep_rreg = &radeon_invalid_rreg; |
237 | rdev->pciep_wreg = &radeon_invalid_wreg; | 233 | rdev->pciep_wreg = &radeon_invalid_wreg; |
238 | 234 | ||
239 | /* Don't change order as we are overridding accessor. */ | 235 | /* Don't change order as we are overridding accessor. */ |
240 | if (rdev->family < CHIP_RV515) { | 236 | if (rdev->family < CHIP_RV515) { |
241 | rdev->pcie_rreg = &rv370_pcie_rreg; | 237 | rdev->pcie_reg_mask = 0xff; |
242 | rdev->pcie_wreg = &rv370_pcie_wreg; | 238 | } else { |
243 | } | 239 | rdev->pcie_reg_mask = 0x7ff; |
244 | if (rdev->family >= CHIP_RV515) { | ||
245 | rdev->pcie_rreg = &rv515_pcie_rreg; | ||
246 | rdev->pcie_wreg = &rv515_pcie_wreg; | ||
247 | } | 240 | } |
248 | /* FIXME: not sure here */ | 241 | /* FIXME: not sure here */ |
249 | if (rdev->family <= CHIP_R580) { | 242 | if (rdev->family <= CHIP_R580) { |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 3933f8216a34..6fa32dac4e97 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -100,9 +100,10 @@ | |||
100 | * 1.28- Add support for VBL on CRTC2 | 100 | * 1.28- Add support for VBL on CRTC2 |
101 | * 1.29- R500 3D cmd buffer support | 101 | * 1.29- R500 3D cmd buffer support |
102 | * 1.30- Add support for occlusion queries | 102 | * 1.30- Add support for occlusion queries |
103 | * 1.31- Add support for num Z pipes from GET_PARAM | ||
103 | */ | 104 | */ |
104 | #define DRIVER_MAJOR 1 | 105 | #define DRIVER_MAJOR 1 |
105 | #define DRIVER_MINOR 30 | 106 | #define DRIVER_MINOR 31 |
106 | #define DRIVER_PATCHLEVEL 0 | 107 | #define DRIVER_PATCHLEVEL 0 |
107 | 108 | ||
108 | /* | 109 | /* |
@@ -329,6 +330,7 @@ typedef struct drm_radeon_private { | |||
329 | resource_size_t fb_aper_offset; | 330 | resource_size_t fb_aper_offset; |
330 | 331 | ||
331 | int num_gb_pipes; | 332 | int num_gb_pipes; |
333 | int num_z_pipes; | ||
332 | int track_flush; | 334 | int track_flush; |
333 | drm_local_map_t *mmio; | 335 | drm_local_map_t *mmio; |
334 | 336 | ||
@@ -689,6 +691,7 @@ extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pciga | |||
689 | 691 | ||
690 | /* pipe config regs */ | 692 | /* pipe config regs */ |
691 | #define R400_GB_PIPE_SELECT 0x402c | 693 | #define R400_GB_PIPE_SELECT 0x402c |
694 | #define RV530_GB_PIPE_SELECT2 0x4124 | ||
692 | #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ | 695 | #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ |
693 | #define R300_GB_TILE_CONFIG 0x4018 | 696 | #define R300_GB_TILE_CONFIG 0x4018 |
694 | # define R300_ENABLE_TILING (1 << 0) | 697 | # define R300_ENABLE_TILING (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 3206c0ad7b6c..ec383edf5f38 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -574,6 +574,8 @@ int radeonfb_create(struct radeon_device *rdev, | |||
574 | goto out_unref; | 574 | goto out_unref; |
575 | } | 575 | } |
576 | 576 | ||
577 | memset_io(fbptr, 0, aligned_size); | ||
578 | |||
577 | strcpy(info->fix.id, "radeondrmfb"); | 579 | strcpy(info->fix.id, "radeondrmfb"); |
578 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 580 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
579 | info->fix.visual = FB_VISUAL_TRUECOLOR; | 581 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index cded5180c752..d880edf254db 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -262,8 +262,34 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
262 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | 262 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
263 | struct drm_file *filp) | 263 | struct drm_file *filp) |
264 | { | 264 | { |
265 | /* FIXME: implement */ | 265 | struct drm_radeon_gem_busy *args = data; |
266 | return 0; | 266 | struct drm_gem_object *gobj; |
267 | struct radeon_object *robj; | ||
268 | int r; | ||
269 | uint32_t cur_placement; | ||
270 | |||
271 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
272 | if (gobj == NULL) { | ||
273 | return -EINVAL; | ||
274 | } | ||
275 | robj = gobj->driver_private; | ||
276 | r = radeon_object_busy_domain(robj, &cur_placement); | ||
277 | switch (cur_placement) { | ||
278 | case TTM_PL_VRAM: | ||
279 | args->domain = RADEON_GEM_DOMAIN_VRAM; | ||
280 | break; | ||
281 | case TTM_PL_TT: | ||
282 | args->domain = RADEON_GEM_DOMAIN_GTT; | ||
283 | break; | ||
284 | case TTM_PL_SYSTEM: | ||
285 | args->domain = RADEON_GEM_DOMAIN_CPU; | ||
286 | default: | ||
287 | break; | ||
288 | } | ||
289 | mutex_lock(&dev->struct_mutex); | ||
290 | drm_gem_object_unreference(gobj); | ||
291 | mutex_unlock(&dev->struct_mutex); | ||
292 | return r; | ||
267 | } | 293 | } |
268 | 294 | ||
269 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | 295 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 491d569deb0e..9805e4b6ca1b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -32,60 +32,6 @@ | |||
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "atom.h" | 33 | #include "atom.h" |
34 | 34 | ||
35 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | ||
36 | { | ||
37 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | ||
38 | uint32_t irq_mask = RADEON_SW_INT_TEST; | ||
39 | |||
40 | if (irqs) { | ||
41 | WREG32(RADEON_GEN_INT_STATUS, irqs); | ||
42 | } | ||
43 | return irqs & irq_mask; | ||
44 | } | ||
45 | |||
46 | int r100_irq_set(struct radeon_device *rdev) | ||
47 | { | ||
48 | uint32_t tmp = 0; | ||
49 | |||
50 | if (rdev->irq.sw_int) { | ||
51 | tmp |= RADEON_SW_INT_ENABLE; | ||
52 | } | ||
53 | /* Todo go through CRTC and enable vblank int or not */ | ||
54 | WREG32(RADEON_GEN_INT_CNTL, tmp); | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | int r100_irq_process(struct radeon_device *rdev) | ||
59 | { | ||
60 | uint32_t status; | ||
61 | |||
62 | status = r100_irq_ack(rdev); | ||
63 | if (!status) { | ||
64 | return IRQ_NONE; | ||
65 | } | ||
66 | while (status) { | ||
67 | /* SW interrupt */ | ||
68 | if (status & RADEON_SW_INT_TEST) { | ||
69 | radeon_fence_process(rdev); | ||
70 | } | ||
71 | status = r100_irq_ack(rdev); | ||
72 | } | ||
73 | return IRQ_HANDLED; | ||
74 | } | ||
75 | |||
76 | int rs600_irq_set(struct radeon_device *rdev) | ||
77 | { | ||
78 | uint32_t tmp = 0; | ||
79 | |||
80 | if (rdev->irq.sw_int) { | ||
81 | tmp |= RADEON_SW_INT_ENABLE; | ||
82 | } | ||
83 | WREG32(RADEON_GEN_INT_CNTL, tmp); | ||
84 | /* Todo go through CRTC and enable vblank int or not */ | ||
85 | WREG32(R500_DxMODE_INT_MASK, 0); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) | 35 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) |
90 | { | 36 | { |
91 | struct drm_device *dev = (struct drm_device *) arg; | 37 | struct drm_device *dev = (struct drm_device *) arg; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3357110e30ce..dce09ada32bc 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -95,6 +95,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
95 | case RADEON_INFO_NUM_GB_PIPES: | 95 | case RADEON_INFO_NUM_GB_PIPES: |
96 | value = rdev->num_gb_pipes; | 96 | value = rdev->num_gb_pipes; |
97 | break; | 97 | break; |
98 | case RADEON_INFO_NUM_Z_PIPES: | ||
99 | value = rdev->num_z_pipes; | ||
100 | break; | ||
98 | default: | 101 | default: |
99 | DRM_DEBUG("Invalid request %d\n", info->request); | 102 | DRM_DEBUG("Invalid request %d\n", info->request); |
100 | return -EINVAL; | 103 | return -EINVAL; |
@@ -141,19 +144,42 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
141 | */ | 144 | */ |
142 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | 145 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) |
143 | { | 146 | { |
144 | /* FIXME: implement */ | 147 | struct radeon_device *rdev = dev->dev_private; |
145 | return 0; | 148 | |
149 | if (crtc < 0 || crtc > 1) { | ||
150 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
154 | return radeon_get_vblank_counter(rdev, crtc); | ||
146 | } | 155 | } |
147 | 156 | ||
148 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) | 157 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) |
149 | { | 158 | { |
150 | /* FIXME: implement */ | 159 | struct radeon_device *rdev = dev->dev_private; |
151 | return 0; | 160 | |
161 | if (crtc < 0 || crtc > 1) { | ||
162 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | rdev->irq.crtc_vblank_int[crtc] = true; | ||
167 | |||
168 | return radeon_irq_set(rdev); | ||
152 | } | 169 | } |
153 | 170 | ||
154 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | 171 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) |
155 | { | 172 | { |
156 | /* FIXME: implement */ | 173 | struct radeon_device *rdev = dev->dev_private; |
174 | |||
175 | if (crtc < 0 || crtc > 1) { | ||
176 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
177 | return; | ||
178 | } | ||
179 | |||
180 | rdev->irq.crtc_vblank_int[crtc] = false; | ||
181 | |||
182 | radeon_irq_set(rdev); | ||
157 | } | 183 | } |
158 | 184 | ||
159 | 185 | ||
@@ -295,5 +321,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
295 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), | 321 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), |
296 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), | 322 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), |
297 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), | 323 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), |
324 | DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), | ||
298 | }; | 325 | }; |
299 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 326 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 7d06dc98a42a..0da72f18fd3a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -310,10 +310,13 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
310 | RADEON_CRTC_DISP_REQ_EN_B)); | 310 | RADEON_CRTC_DISP_REQ_EN_B)); |
311 | WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); | 311 | WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); |
312 | } | 312 | } |
313 | drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); | ||
314 | radeon_crtc_load_lut(crtc); | ||
313 | break; | 315 | break; |
314 | case DRM_MODE_DPMS_STANDBY: | 316 | case DRM_MODE_DPMS_STANDBY: |
315 | case DRM_MODE_DPMS_SUSPEND: | 317 | case DRM_MODE_DPMS_SUSPEND: |
316 | case DRM_MODE_DPMS_OFF: | 318 | case DRM_MODE_DPMS_OFF: |
319 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | ||
317 | if (radeon_crtc->crtc_id) | 320 | if (radeon_crtc->crtc_id) |
318 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); | 321 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); |
319 | else { | 322 | else { |
@@ -323,10 +326,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
323 | } | 326 | } |
324 | break; | 327 | break; |
325 | } | 328 | } |
326 | |||
327 | if (mode != DRM_MODE_DPMS_OFF) { | ||
328 | radeon_crtc_load_lut(crtc); | ||
329 | } | ||
330 | } | 329 | } |
331 | 330 | ||
332 | /* properly set crtc bpp when using atombios */ | 331 | /* properly set crtc bpp when using atombios */ |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 34d0f58eb944..9322675ef6d0 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -1066,6 +1066,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1066 | 1066 | ||
1067 | switch (radeon_encoder->encoder_id) { | 1067 | switch (radeon_encoder->encoder_id) { |
1068 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1068 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
1069 | encoder->possible_crtcs = 0x1; | ||
1069 | drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); | 1070 | drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); |
1070 | drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); | 1071 | drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); |
1071 | if (rdev->is_atom_bios) | 1072 | if (rdev->is_atom_bios) |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index e98cae3bf4a6..b85fb83d7ae8 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -316,6 +316,25 @@ int radeon_object_wait(struct radeon_object *robj) | |||
316 | return r; | 316 | return r; |
317 | } | 317 | } |
318 | 318 | ||
319 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) | ||
320 | { | ||
321 | int r = 0; | ||
322 | |||
323 | r = radeon_object_reserve(robj, true); | ||
324 | if (unlikely(r != 0)) { | ||
325 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | ||
326 | return r; | ||
327 | } | ||
328 | spin_lock(&robj->tobj.lock); | ||
329 | *cur_placement = robj->tobj.mem.mem_type; | ||
330 | if (robj->tobj.sync_obj) { | ||
331 | r = ttm_bo_wait(&robj->tobj, true, true, true); | ||
332 | } | ||
333 | spin_unlock(&robj->tobj.lock); | ||
334 | radeon_object_unreserve(robj); | ||
335 | return r; | ||
336 | } | ||
337 | |||
319 | int radeon_object_evict_vram(struct radeon_device *rdev) | 338 | int radeon_object_evict_vram(struct radeon_device *rdev) |
320 | { | 339 | { |
321 | if (rdev->flags & RADEON_IS_IGP) { | 340 | if (rdev->flags & RADEON_IS_IGP) { |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index e1b618574461..4df43f62c678 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -982,12 +982,15 @@ | |||
982 | # define RS400_TMDS2_PLLRST (1 << 1) | 982 | # define RS400_TMDS2_PLLRST (1 << 1) |
983 | 983 | ||
984 | #define RADEON_GEN_INT_CNTL 0x0040 | 984 | #define RADEON_GEN_INT_CNTL 0x0040 |
985 | # define RADEON_CRTC_VBLANK_MASK (1 << 0) | ||
986 | # define RADEON_CRTC2_VBLANK_MASK (1 << 9) | ||
985 | # define RADEON_SW_INT_ENABLE (1 << 25) | 987 | # define RADEON_SW_INT_ENABLE (1 << 25) |
986 | #define RADEON_GEN_INT_STATUS 0x0044 | 988 | #define RADEON_GEN_INT_STATUS 0x0044 |
987 | # define RADEON_VSYNC_INT_AK (1 << 2) | 989 | # define AVIVO_DISPLAY_INT_STATUS (1 << 0) |
988 | # define RADEON_VSYNC_INT (1 << 2) | 990 | # define RADEON_CRTC_VBLANK_STAT (1 << 0) |
989 | # define RADEON_VSYNC2_INT_AK (1 << 6) | 991 | # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) |
990 | # define RADEON_VSYNC2_INT (1 << 6) | 992 | # define RADEON_CRTC2_VBLANK_STAT (1 << 9) |
993 | # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) | ||
991 | # define RADEON_SW_INT_FIRE (1 << 26) | 994 | # define RADEON_SW_INT_FIRE (1 << 26) |
992 | # define RADEON_SW_INT_TEST (1 << 25) | 995 | # define RADEON_SW_INT_TEST (1 << 25) |
993 | # define RADEON_SW_INT_TEST_ACK (1 << 25) | 996 | # define RADEON_SW_INT_TEST_ACK (1 << 25) |
@@ -2334,6 +2337,9 @@ | |||
2334 | # define RADEON_RE_WIDTH_SHIFT 0 | 2337 | # define RADEON_RE_WIDTH_SHIFT 0 |
2335 | # define RADEON_RE_HEIGHT_SHIFT 16 | 2338 | # define RADEON_RE_HEIGHT_SHIFT 16 |
2336 | 2339 | ||
2340 | #define RADEON_RB3D_ZPASS_DATA 0x3290 | ||
2341 | #define RADEON_RB3D_ZPASS_ADDR 0x3294 | ||
2342 | |||
2337 | #define RADEON_SE_CNTL 0x1c4c | 2343 | #define RADEON_SE_CNTL 0x1c4c |
2338 | # define RADEON_FFACE_CULL_CW (0 << 0) | 2344 | # define RADEON_FFACE_CULL_CW (0 << 0) |
2339 | # define RADEON_FFACE_CULL_CCW (1 << 0) | 2345 | # define RADEON_FFACE_CULL_CCW (1 << 0) |
@@ -3568,4 +3574,6 @@ | |||
3568 | #define RADEON_SCRATCH_REG4 0x15f0 | 3574 | #define RADEON_SCRATCH_REG4 0x15f0 |
3569 | #define RADEON_SCRATCH_REG5 0x15f4 | 3575 | #define RADEON_SCRATCH_REG5 0x15f4 |
3570 | 3576 | ||
3577 | #define RV530_GB_PIPE_SELECT2 0x4124 | ||
3578 | |||
3571 | #endif | 3579 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 46645f3e0328..2882f40d5ec5 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -3081,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
3081 | case RADEON_PARAM_NUM_GB_PIPES: | 3081 | case RADEON_PARAM_NUM_GB_PIPES: |
3082 | value = dev_priv->num_gb_pipes; | 3082 | value = dev_priv->num_gb_pipes; |
3083 | break; | 3083 | break; |
3084 | case RADEON_PARAM_NUM_Z_PIPES: | ||
3085 | value = dev_priv->num_z_pipes; | ||
3086 | break; | ||
3084 | default: | 3087 | default: |
3085 | DRM_DEBUG("Invalid parameter %d\n", param->param); | 3088 | DRM_DEBUG("Invalid parameter %d\n", param->param); |
3086 | return -EINVAL; | 3089 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index bbea6dee4a94..02fd11aad6a2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -240,6 +240,88 @@ void rs600_mc_fini(struct radeon_device *rdev) | |||
240 | 240 | ||
241 | 241 | ||
242 | /* | 242 | /* |
243 | * Interrupts | ||
244 | */ | ||
245 | int rs600_irq_set(struct radeon_device *rdev) | ||
246 | { | ||
247 | uint32_t tmp = 0; | ||
248 | uint32_t mode_int = 0; | ||
249 | |||
250 | if (rdev->irq.sw_int) { | ||
251 | tmp |= RADEON_SW_INT_ENABLE; | ||
252 | } | ||
253 | if (rdev->irq.crtc_vblank_int[0]) { | ||
254 | tmp |= AVIVO_DISPLAY_INT_STATUS; | ||
255 | mode_int |= AVIVO_D1MODE_INT_MASK; | ||
256 | } | ||
257 | if (rdev->irq.crtc_vblank_int[1]) { | ||
258 | tmp |= AVIVO_DISPLAY_INT_STATUS; | ||
259 | mode_int |= AVIVO_D2MODE_INT_MASK; | ||
260 | } | ||
261 | WREG32(RADEON_GEN_INT_CNTL, tmp); | ||
262 | WREG32(AVIVO_DxMODE_INT_MASK, mode_int); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | ||
267 | { | ||
268 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | ||
269 | uint32_t irq_mask = RADEON_SW_INT_TEST; | ||
270 | |||
271 | if (irqs & AVIVO_DISPLAY_INT_STATUS) { | ||
272 | *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); | ||
273 | if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | ||
274 | WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | ||
275 | } | ||
276 | if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | ||
277 | WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | ||
278 | } | ||
279 | } else { | ||
280 | *r500_disp_int = 0; | ||
281 | } | ||
282 | |||
283 | if (irqs) { | ||
284 | WREG32(RADEON_GEN_INT_STATUS, irqs); | ||
285 | } | ||
286 | return irqs & irq_mask; | ||
287 | } | ||
288 | |||
289 | int rs600_irq_process(struct radeon_device *rdev) | ||
290 | { | ||
291 | uint32_t status; | ||
292 | uint32_t r500_disp_int; | ||
293 | |||
294 | status = rs600_irq_ack(rdev, &r500_disp_int); | ||
295 | if (!status && !r500_disp_int) { | ||
296 | return IRQ_NONE; | ||
297 | } | ||
298 | while (status || r500_disp_int) { | ||
299 | /* SW interrupt */ | ||
300 | if (status & RADEON_SW_INT_TEST) { | ||
301 | radeon_fence_process(rdev); | ||
302 | } | ||
303 | /* Vertical blank interrupts */ | ||
304 | if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | ||
305 | drm_handle_vblank(rdev->ddev, 0); | ||
306 | } | ||
307 | if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | ||
308 | drm_handle_vblank(rdev->ddev, 1); | ||
309 | } | ||
310 | status = rs600_irq_ack(rdev, &r500_disp_int); | ||
311 | } | ||
312 | return IRQ_HANDLED; | ||
313 | } | ||
314 | |||
315 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | ||
316 | { | ||
317 | if (crtc == 0) | ||
318 | return RREG32(AVIVO_D1CRTC_FRAME_COUNT); | ||
319 | else | ||
320 | return RREG32(AVIVO_D2CRTC_FRAME_COUNT); | ||
321 | } | ||
322 | |||
323 | |||
324 | /* | ||
243 | * Global GPU functions | 325 | * Global GPU functions |
244 | */ | 326 | */ |
245 | void rs600_disable_vga(struct radeon_device *rdev) | 327 | void rs600_disable_vga(struct radeon_device *rdev) |
@@ -327,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
327 | ((reg) & RS600_MC_ADDR_MASK)); | 409 | ((reg) & RS600_MC_ADDR_MASK)); |
328 | WREG32(RS600_MC_DATA, v); | 410 | WREG32(RS600_MC_DATA, v); |
329 | } | 411 | } |
412 | |||
413 | static const unsigned rs600_reg_safe_bm[219] = { | ||
414 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
415 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
416 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
417 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
418 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
419 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
420 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
421 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
422 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
423 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
424 | 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, | ||
425 | 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, | ||
426 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
427 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
428 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, | ||
429 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
430 | 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, | ||
431 | 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, | ||
432 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
433 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
434 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
435 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
436 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
437 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
438 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
439 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
440 | 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
441 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
442 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
443 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
444 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
445 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
446 | 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, | ||
447 | 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, | ||
448 | 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, | ||
449 | 0x00000000, 0x0000C100, 0x00000000, 0x00000000, | ||
450 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
451 | 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, | ||
452 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
453 | 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF, | ||
454 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
455 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
456 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
457 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
458 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
459 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
460 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
461 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
462 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
463 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
464 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
465 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
466 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
467 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
468 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
469 | }; | ||
470 | |||
471 | int rs600_init(struct radeon_device *rdev) | ||
472 | { | ||
473 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | ||
474 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | ||
475 | return 0; | ||
476 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 839595b00728..879882533e45 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -652,3 +652,4 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
652 | WREG32(RS690_MC_DATA, v); | 652 | WREG32(RS690_MC_DATA, v); |
653 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); | 653 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); |
654 | } | 654 | } |
655 | |||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index fd8f3ca716ea..0566fb67e460 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -400,25 +400,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
400 | WREG32(MC_IND_INDEX, 0); | 400 | WREG32(MC_IND_INDEX, 0); |
401 | } | 401 | } |
402 | 402 | ||
403 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | ||
404 | { | ||
405 | uint32_t r; | ||
406 | |||
407 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); | ||
408 | (void)RREG32(PCIE_INDEX); | ||
409 | r = RREG32(PCIE_DATA); | ||
410 | return r; | ||
411 | } | ||
412 | |||
413 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
414 | { | ||
415 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); | ||
416 | (void)RREG32(PCIE_INDEX); | ||
417 | WREG32(PCIE_DATA, (v)); | ||
418 | (void)RREG32(PCIE_DATA); | ||
419 | } | ||
420 | |||
421 | |||
422 | /* | 403 | /* |
423 | * Debugfs info | 404 | * Debugfs info |
424 | */ | 405 | */ |
@@ -527,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = { | |||
527 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 508 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
528 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, | 509 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, |
529 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 510 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
530 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, | 511 | 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF, |
531 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 512 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
532 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 513 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
533 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 514 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index d258b02aef44..827da0858136 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -674,7 +674,14 @@ omap_i2c_isr(int this_irq, void *dev_id) | |||
674 | 674 | ||
675 | err = 0; | 675 | err = 0; |
676 | complete: | 676 | complete: |
677 | omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); | 677 | /* |
678 | * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be | ||
679 | * acked after the data operation is complete. | ||
680 | * Ref: TRM SWPU114Q Figure 18-31 | ||
681 | */ | ||
682 | omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat & | ||
683 | ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | | ||
684 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | ||
678 | 685 | ||
679 | if (stat & OMAP_I2C_STAT_NACK) { | 686 | if (stat & OMAP_I2C_STAT_NACK) { |
680 | err |= OMAP_I2C_STAT_NACK; | 687 | err |= OMAP_I2C_STAT_NACK; |
@@ -687,6 +694,9 @@ complete: | |||
687 | } | 694 | } |
688 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | | 695 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | |
689 | OMAP_I2C_STAT_AL)) { | 696 | OMAP_I2C_STAT_AL)) { |
697 | omap_i2c_ack_stat(dev, stat & | ||
698 | (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | | ||
699 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | ||
690 | omap_i2c_complete_cmd(dev, err); | 700 | omap_i2c_complete_cmd(dev, err); |
691 | return IRQ_HANDLED; | 701 | return IRQ_HANDLED; |
692 | } | 702 | } |
@@ -774,7 +784,7 @@ complete: | |||
774 | * memory to the I2C interface. | 784 | * memory to the I2C interface. |
775 | */ | 785 | */ |
776 | 786 | ||
777 | if (cpu_is_omap34xx()) { | 787 | if (dev->rev <= OMAP_I2C_REV_ON_3430) { |
778 | while (!(stat & OMAP_I2C_STAT_XUDF)) { | 788 | while (!(stat & OMAP_I2C_STAT_XUDF)) { |
779 | if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { | 789 | if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { |
780 | omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | 790 | omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); |
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 182e711318ba..d2728a28a8db 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -117,7 +117,8 @@ enum stu300_error { | |||
117 | STU300_ERROR_NONE = 0, | 117 | STU300_ERROR_NONE = 0, |
118 | STU300_ERROR_ACKNOWLEDGE_FAILURE, | 118 | STU300_ERROR_ACKNOWLEDGE_FAILURE, |
119 | STU300_ERROR_BUS_ERROR, | 119 | STU300_ERROR_BUS_ERROR, |
120 | STU300_ERROR_ARBITRATION_LOST | 120 | STU300_ERROR_ARBITRATION_LOST, |
121 | STU300_ERROR_UNKNOWN | ||
121 | }; | 122 | }; |
122 | 123 | ||
123 | /* timeout waiting for the controller to respond */ | 124 | /* timeout waiting for the controller to respond */ |
@@ -127,7 +128,7 @@ enum stu300_error { | |||
127 | * The number of address send athemps tried before giving up. | 128 | * The number of address send athemps tried before giving up. |
128 | * If the first one failes it seems like 5 to 8 attempts are required. | 129 | * If the first one failes it seems like 5 to 8 attempts are required. |
129 | */ | 130 | */ |
130 | #define NUM_ADDR_RESEND_ATTEMPTS 10 | 131 | #define NUM_ADDR_RESEND_ATTEMPTS 12 |
131 | 132 | ||
132 | /* I2C clock speed, in Hz 0-400kHz*/ | 133 | /* I2C clock speed, in Hz 0-400kHz*/ |
133 | static unsigned int scl_frequency = 100000; | 134 | static unsigned int scl_frequency = 100000; |
@@ -149,6 +150,7 @@ module_param(scl_frequency, uint, 0644); | |||
149 | * @msg_index: index of current message | 150 | * @msg_index: index of current message |
150 | * @msg_len: length of current message | 151 | * @msg_len: length of current message |
151 | */ | 152 | */ |
153 | |||
152 | struct stu300_dev { | 154 | struct stu300_dev { |
153 | struct platform_device *pdev; | 155 | struct platform_device *pdev; |
154 | struct i2c_adapter adapter; | 156 | struct i2c_adapter adapter; |
@@ -188,6 +190,27 @@ static inline u32 stu300_r8(void __iomem *address) | |||
188 | return readl(address) & 0x000000FFU; | 190 | return readl(address) & 0x000000FFU; |
189 | } | 191 | } |
190 | 192 | ||
193 | static void stu300_irq_enable(struct stu300_dev *dev) | ||
194 | { | ||
195 | u32 val; | ||
196 | val = stu300_r8(dev->virtbase + I2C_CR); | ||
197 | val |= I2C_CR_INTERRUPT_ENABLE; | ||
198 | /* Twice paranoia (possible HW glitch) */ | ||
199 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
200 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
201 | } | ||
202 | |||
203 | static void stu300_irq_disable(struct stu300_dev *dev) | ||
204 | { | ||
205 | u32 val; | ||
206 | val = stu300_r8(dev->virtbase + I2C_CR); | ||
207 | val &= ~I2C_CR_INTERRUPT_ENABLE; | ||
208 | /* Twice paranoia (possible HW glitch) */ | ||
209 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
210 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
211 | } | ||
212 | |||
213 | |||
191 | /* | 214 | /* |
192 | * Tells whether a certain event or events occurred in | 215 | * Tells whether a certain event or events occurred in |
193 | * response to a command. The events represent states in | 216 | * response to a command. The events represent states in |
@@ -196,9 +219,10 @@ static inline u32 stu300_r8(void __iomem *address) | |||
196 | * documentation and can only be treated as abstract state | 219 | * documentation and can only be treated as abstract state |
197 | * machine states. | 220 | * machine states. |
198 | * | 221 | * |
199 | * @ret 0 = event has not occurred, any other value means | 222 | * @ret 0 = event has not occurred or unknown error, any |
200 | * the event occurred. | 223 | * other value means the correct event occurred or an error. |
201 | */ | 224 | */ |
225 | |||
202 | static int stu300_event_occurred(struct stu300_dev *dev, | 226 | static int stu300_event_occurred(struct stu300_dev *dev, |
203 | enum stu300_event mr_event) { | 227 | enum stu300_event mr_event) { |
204 | u32 status1; | 228 | u32 status1; |
@@ -206,11 +230,28 @@ static int stu300_event_occurred(struct stu300_dev *dev, | |||
206 | 230 | ||
207 | /* What event happened? */ | 231 | /* What event happened? */ |
208 | status1 = stu300_r8(dev->virtbase + I2C_SR1); | 232 | status1 = stu300_r8(dev->virtbase + I2C_SR1); |
233 | |||
209 | if (!(status1 & I2C_SR1_EVF_IND)) | 234 | if (!(status1 & I2C_SR1_EVF_IND)) |
210 | /* No event at all */ | 235 | /* No event at all */ |
211 | return 0; | 236 | return 0; |
237 | |||
212 | status2 = stu300_r8(dev->virtbase + I2C_SR2); | 238 | status2 = stu300_r8(dev->virtbase + I2C_SR2); |
213 | 239 | ||
240 | /* Block any multiple interrupts */ | ||
241 | stu300_irq_disable(dev); | ||
242 | |||
243 | /* Check for errors first */ | ||
244 | if (status2 & I2C_SR2_AF_IND) { | ||
245 | dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; | ||
246 | return 1; | ||
247 | } else if (status2 & I2C_SR2_BERR_IND) { | ||
248 | dev->cmd_err = STU300_ERROR_BUS_ERROR; | ||
249 | return 1; | ||
250 | } else if (status2 & I2C_SR2_ARLO_IND) { | ||
251 | dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; | ||
252 | return 1; | ||
253 | } | ||
254 | |||
214 | switch (mr_event) { | 255 | switch (mr_event) { |
215 | case STU300_EVENT_1: | 256 | case STU300_EVENT_1: |
216 | if (status1 & I2C_SR1_ADSL_IND) | 257 | if (status1 & I2C_SR1_ADSL_IND) |
@@ -221,10 +262,6 @@ static int stu300_event_occurred(struct stu300_dev *dev, | |||
221 | case STU300_EVENT_7: | 262 | case STU300_EVENT_7: |
222 | case STU300_EVENT_8: | 263 | case STU300_EVENT_8: |
223 | if (status1 & I2C_SR1_BTF_IND) { | 264 | if (status1 & I2C_SR1_BTF_IND) { |
224 | if (status2 & I2C_SR2_AF_IND) | ||
225 | dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; | ||
226 | else if (status2 & I2C_SR2_BERR_IND) | ||
227 | dev->cmd_err = STU300_ERROR_BUS_ERROR; | ||
228 | return 1; | 265 | return 1; |
229 | } | 266 | } |
230 | break; | 267 | break; |
@@ -240,8 +277,6 @@ static int stu300_event_occurred(struct stu300_dev *dev, | |||
240 | case STU300_EVENT_6: | 277 | case STU300_EVENT_6: |
241 | if (status2 & I2C_SR2_ENDAD_IND) { | 278 | if (status2 & I2C_SR2_ENDAD_IND) { |
242 | /* First check for any errors */ | 279 | /* First check for any errors */ |
243 | if (status2 & I2C_SR2_AF_IND) | ||
244 | dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; | ||
245 | return 1; | 280 | return 1; |
246 | } | 281 | } |
247 | break; | 282 | break; |
@@ -252,8 +287,15 @@ static int stu300_event_occurred(struct stu300_dev *dev, | |||
252 | default: | 287 | default: |
253 | break; | 288 | break; |
254 | } | 289 | } |
255 | if (status2 & I2C_SR2_ARLO_IND) | 290 | /* If we get here, we're on thin ice. |
256 | dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; | 291 | * Here we are in a status where we have |
292 | * gotten a response that does not match | ||
293 | * what we requested. | ||
294 | */ | ||
295 | dev->cmd_err = STU300_ERROR_UNKNOWN; | ||
296 | dev_err(&dev->pdev->dev, | ||
297 | "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n", | ||
298 | mr_event, status1, status2); | ||
257 | return 0; | 299 | return 0; |
258 | } | 300 | } |
259 | 301 | ||
@@ -262,21 +304,20 @@ static irqreturn_t stu300_irh(int irq, void *data) | |||
262 | struct stu300_dev *dev = data; | 304 | struct stu300_dev *dev = data; |
263 | int res; | 305 | int res; |
264 | 306 | ||
307 | /* Just make sure that the block is clocked */ | ||
308 | clk_enable(dev->clk); | ||
309 | |||
265 | /* See if this was what we were waiting for */ | 310 | /* See if this was what we were waiting for */ |
266 | spin_lock(&dev->cmd_issue_lock); | 311 | spin_lock(&dev->cmd_issue_lock); |
267 | if (dev->cmd_event != STU300_EVENT_NONE) { | 312 | |
268 | res = stu300_event_occurred(dev, dev->cmd_event); | 313 | res = stu300_event_occurred(dev, dev->cmd_event); |
269 | if (res || dev->cmd_err != STU300_ERROR_NONE) { | 314 | if (res || dev->cmd_err != STU300_ERROR_NONE) |
270 | u32 val; | 315 | complete(&dev->cmd_complete); |
271 | 316 | ||
272 | complete(&dev->cmd_complete); | ||
273 | /* Block any multiple interrupts */ | ||
274 | val = stu300_r8(dev->virtbase + I2C_CR); | ||
275 | val &= ~I2C_CR_INTERRUPT_ENABLE; | ||
276 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
277 | } | ||
278 | } | ||
279 | spin_unlock(&dev->cmd_issue_lock); | 317 | spin_unlock(&dev->cmd_issue_lock); |
318 | |||
319 | clk_disable(dev->clk); | ||
320 | |||
280 | return IRQ_HANDLED; | 321 | return IRQ_HANDLED; |
281 | } | 322 | } |
282 | 323 | ||
@@ -308,7 +349,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev, | |||
308 | stu300_wr8(cr_value, dev->virtbase + I2C_CR); | 349 | stu300_wr8(cr_value, dev->virtbase + I2C_CR); |
309 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, | 350 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, |
310 | STU300_TIMEOUT); | 351 | STU300_TIMEOUT); |
311 | |||
312 | if (ret < 0) { | 352 | if (ret < 0) { |
313 | dev_err(&dev->pdev->dev, | 353 | dev_err(&dev->pdev->dev, |
314 | "wait_for_completion_interruptible_timeout() " | 354 | "wait_for_completion_interruptible_timeout() " |
@@ -342,7 +382,6 @@ static int stu300_await_event(struct stu300_dev *dev, | |||
342 | enum stu300_event mr_event) | 382 | enum stu300_event mr_event) |
343 | { | 383 | { |
344 | int ret; | 384 | int ret; |
345 | u32 val; | ||
346 | 385 | ||
347 | if (unlikely(irqs_disabled())) { | 386 | if (unlikely(irqs_disabled())) { |
348 | /* TODO: implement polling for this case if need be. */ | 387 | /* TODO: implement polling for this case if need be. */ |
@@ -354,36 +393,18 @@ static int stu300_await_event(struct stu300_dev *dev, | |||
354 | /* Is it already here? */ | 393 | /* Is it already here? */ |
355 | spin_lock_irq(&dev->cmd_issue_lock); | 394 | spin_lock_irq(&dev->cmd_issue_lock); |
356 | dev->cmd_err = STU300_ERROR_NONE; | 395 | dev->cmd_err = STU300_ERROR_NONE; |
357 | if (stu300_event_occurred(dev, mr_event)) { | ||
358 | spin_unlock_irq(&dev->cmd_issue_lock); | ||
359 | goto exit_await_check_err; | ||
360 | } | ||
361 | init_completion(&dev->cmd_complete); | ||
362 | dev->cmd_err = STU300_ERROR_NONE; | ||
363 | dev->cmd_event = mr_event; | 396 | dev->cmd_event = mr_event; |
364 | 397 | ||
365 | /* Turn on the I2C interrupt for current operation */ | 398 | init_completion(&dev->cmd_complete); |
366 | val = stu300_r8(dev->virtbase + I2C_CR); | ||
367 | val |= I2C_CR_INTERRUPT_ENABLE; | ||
368 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
369 | |||
370 | /* Twice paranoia (possible HW glitch) */ | ||
371 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
372 | 399 | ||
373 | /* Check again: is it already here? */ | 400 | /* Turn on the I2C interrupt for current operation */ |
374 | if (unlikely(stu300_event_occurred(dev, mr_event))) { | 401 | stu300_irq_enable(dev); |
375 | /* Disable IRQ again. */ | ||
376 | val &= ~I2C_CR_INTERRUPT_ENABLE; | ||
377 | stu300_wr8(val, dev->virtbase + I2C_CR); | ||
378 | spin_unlock_irq(&dev->cmd_issue_lock); | ||
379 | goto exit_await_check_err; | ||
380 | } | ||
381 | 402 | ||
382 | /* Unlock the command block and wait for the event to occur */ | 403 | /* Unlock the command block and wait for the event to occur */ |
383 | spin_unlock_irq(&dev->cmd_issue_lock); | 404 | spin_unlock_irq(&dev->cmd_issue_lock); |
405 | |||
384 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, | 406 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, |
385 | STU300_TIMEOUT); | 407 | STU300_TIMEOUT); |
386 | |||
387 | if (ret < 0) { | 408 | if (ret < 0) { |
388 | dev_err(&dev->pdev->dev, | 409 | dev_err(&dev->pdev->dev, |
389 | "wait_for_completion_interruptible_timeout()" | 410 | "wait_for_completion_interruptible_timeout()" |
@@ -401,7 +422,6 @@ static int stu300_await_event(struct stu300_dev *dev, | |||
401 | return -ETIMEDOUT; | 422 | return -ETIMEDOUT; |
402 | } | 423 | } |
403 | 424 | ||
404 | exit_await_check_err: | ||
405 | if (dev->cmd_err != STU300_ERROR_NONE) { | 425 | if (dev->cmd_err != STU300_ERROR_NONE) { |
406 | if (mr_event != STU300_EVENT_6) { | 426 | if (mr_event != STU300_EVENT_6) { |
407 | dev_err(&dev->pdev->dev, "controller " | 427 | dev_err(&dev->pdev->dev, "controller " |
@@ -457,18 +477,19 @@ struct stu300_clkset { | |||
457 | }; | 477 | }; |
458 | 478 | ||
459 | static const struct stu300_clkset stu300_clktable[] = { | 479 | static const struct stu300_clkset stu300_clktable[] = { |
460 | { 0, 0xFFU }, | 480 | { 0, 0xFFU }, |
461 | { 2500000, I2C_OAR2_FR_25_10MHZ }, | 481 | { 2500000, I2C_OAR2_FR_25_10MHZ }, |
462 | { 10000000, I2C_OAR2_FR_10_1667MHZ }, | 482 | { 10000000, I2C_OAR2_FR_10_1667MHZ }, |
463 | { 16670000, I2C_OAR2_FR_1667_2667MHZ }, | 483 | { 16670000, I2C_OAR2_FR_1667_2667MHZ }, |
464 | { 26670000, I2C_OAR2_FR_2667_40MHZ }, | 484 | { 26670000, I2C_OAR2_FR_2667_40MHZ }, |
465 | { 40000000, I2C_OAR2_FR_40_5333MHZ }, | 485 | { 40000000, I2C_OAR2_FR_40_5333MHZ }, |
466 | { 53330000, I2C_OAR2_FR_5333_66MHZ }, | 486 | { 53330000, I2C_OAR2_FR_5333_66MHZ }, |
467 | { 66000000, I2C_OAR2_FR_66_80MHZ }, | 487 | { 66000000, I2C_OAR2_FR_66_80MHZ }, |
468 | { 80000000, I2C_OAR2_FR_80_100MHZ }, | 488 | { 80000000, I2C_OAR2_FR_80_100MHZ }, |
469 | { 100000000, 0xFFU }, | 489 | { 100000000, 0xFFU }, |
470 | }; | 490 | }; |
471 | 491 | ||
492 | |||
472 | static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) | 493 | static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) |
473 | { | 494 | { |
474 | 495 | ||
@@ -494,10 +515,10 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) | |||
494 | 515 | ||
495 | if (dev->speed > 100000) | 516 | if (dev->speed > 100000) |
496 | /* Fast Mode I2C */ | 517 | /* Fast Mode I2C */ |
497 | val = ((clkrate/dev->speed)-9)/3; | 518 | val = ((clkrate/dev->speed) - 9)/3 + 1; |
498 | else | 519 | else |
499 | /* Standard Mode I2C */ | 520 | /* Standard Mode I2C */ |
500 | val = ((clkrate/dev->speed)-7)/2; | 521 | val = ((clkrate/dev->speed) - 7)/2 + 1; |
501 | 522 | ||
502 | /* According to spec the divider must be > 2 */ | 523 | /* According to spec the divider must be > 2 */ |
503 | if (val < 0x002) { | 524 | if (val < 0x002) { |
@@ -557,6 +578,7 @@ static int stu300_init_hw(struct stu300_dev *dev) | |||
557 | */ | 578 | */ |
558 | clkrate = clk_get_rate(dev->clk); | 579 | clkrate = clk_get_rate(dev->clk); |
559 | ret = stu300_set_clk(dev, clkrate); | 580 | ret = stu300_set_clk(dev, clkrate); |
581 | |||
560 | if (ret) | 582 | if (ret) |
561 | return ret; | 583 | return ret; |
562 | /* | 584 | /* |
@@ -641,7 +663,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap, | |||
641 | int attempts = 0; | 663 | int attempts = 0; |
642 | struct stu300_dev *dev = i2c_get_adapdata(adap); | 664 | struct stu300_dev *dev = i2c_get_adapdata(adap); |
643 | 665 | ||
644 | |||
645 | clk_enable(dev->clk); | 666 | clk_enable(dev->clk); |
646 | 667 | ||
647 | /* Remove this if (0) to trace each and every message. */ | 668 | /* Remove this if (0) to trace each and every message. */ |
@@ -715,14 +736,15 @@ static int stu300_xfer_msg(struct i2c_adapter *adap, | |||
715 | 736 | ||
716 | if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) { | 737 | if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) { |
717 | dev_dbg(&dev->pdev->dev, "managed to get address " | 738 | dev_dbg(&dev->pdev->dev, "managed to get address " |
718 | "through after %d attempts\n", attempts); | 739 | "through after %d attempts\n", attempts); |
719 | } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) { | 740 | } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) { |
720 | dev_dbg(&dev->pdev->dev, "I give up, tried %d times " | 741 | dev_dbg(&dev->pdev->dev, "I give up, tried %d times " |
721 | "to resend address.\n", | 742 | "to resend address.\n", |
722 | NUM_ADDR_RESEND_ATTEMPTS); | 743 | NUM_ADDR_RESEND_ATTEMPTS); |
723 | goto exit_disable; | 744 | goto exit_disable; |
724 | } | 745 | } |
725 | 746 | ||
747 | |||
726 | if (msg->flags & I2C_M_RD) { | 748 | if (msg->flags & I2C_M_RD) { |
727 | /* READ: we read the actual bytes one at a time */ | 749 | /* READ: we read the actual bytes one at a time */ |
728 | for (i = 0; i < msg->len; i++) { | 750 | for (i = 0; i < msg->len; i++) { |
@@ -804,8 +826,10 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, | |||
804 | { | 826 | { |
805 | int ret = -1; | 827 | int ret = -1; |
806 | int i; | 828 | int i; |
829 | |||
807 | struct stu300_dev *dev = i2c_get_adapdata(adap); | 830 | struct stu300_dev *dev = i2c_get_adapdata(adap); |
808 | dev->msg_len = num; | 831 | dev->msg_len = num; |
832 | |||
809 | for (i = 0; i < num; i++) { | 833 | for (i = 0; i < num; i++) { |
810 | /* | 834 | /* |
811 | * Another driver appears to send stop for each message, | 835 | * Another driver appears to send stop for each message, |
@@ -817,6 +841,7 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, | |||
817 | dev->msg_index = i; | 841 | dev->msg_index = i; |
818 | 842 | ||
819 | ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1))); | 843 | ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1))); |
844 | |||
820 | if (ret != 0) { | 845 | if (ret != 0) { |
821 | num = ret; | 846 | num = ret; |
822 | break; | 847 | break; |
@@ -845,6 +870,7 @@ stu300_probe(struct platform_device *pdev) | |||
845 | struct resource *res; | 870 | struct resource *res; |
846 | int bus_nr; | 871 | int bus_nr; |
847 | int ret = 0; | 872 | int ret = 0; |
873 | char clk_name[] = "I2C0"; | ||
848 | 874 | ||
849 | dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); | 875 | dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); |
850 | if (!dev) { | 876 | if (!dev) { |
@@ -854,7 +880,8 @@ stu300_probe(struct platform_device *pdev) | |||
854 | } | 880 | } |
855 | 881 | ||
856 | bus_nr = pdev->id; | 882 | bus_nr = pdev->id; |
857 | dev->clk = clk_get(&pdev->dev, NULL); | 883 | clk_name[3] += (char)bus_nr; |
884 | dev->clk = clk_get(&pdev->dev, clk_name); | ||
858 | if (IS_ERR(dev->clk)) { | 885 | if (IS_ERR(dev->clk)) { |
859 | ret = PTR_ERR(dev->clk); | 886 | ret = PTR_ERR(dev->clk); |
860 | dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); | 887 | dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); |
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c index 923cbfe259d3..6396c3ad3252 100644 --- a/drivers/ide/atiixp.c +++ b/drivers/ide/atiixp.c | |||
@@ -177,6 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = { | |||
177 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, | 177 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, |
178 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, | 178 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, |
179 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, | 179 | { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, |
180 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 }, | ||
180 | { 0, }, | 181 | { 0, }, |
181 | }; | 182 | }; |
182 | MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); | 183 | MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 527908ff298c..063b933d864a 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = { | |||
408 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), | 408 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), |
409 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), | 409 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), |
410 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), | 410 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), |
411 | PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), | ||
411 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), | 412 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), |
412 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), | 413 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), |
413 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), | 414 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 8f9509e1ebf7..55d093a36ae4 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) | |||
362 | * In either case, must tell the provider to reject. | 362 | * In either case, must tell the provider to reject. |
363 | */ | 363 | */ |
364 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | 364 | cm_id_priv->state = IW_CM_STATE_DESTROYING; |
365 | cm_id->device->iwcm->reject(cm_id, NULL, 0); | ||
365 | break; | 366 | break; |
366 | case IW_CM_STATE_CONN_SENT: | 367 | case IW_CM_STATE_CONN_SENT: |
367 | case IW_CM_STATE_DESTROYING: | 368 | case IW_CM_STATE_DESTROYING: |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index de922a04ca2d..7522008fda86 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | 3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | 4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. |
5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. | ||
5 | * | 6 | * |
6 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 8 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API"); | |||
45 | MODULE_AUTHOR("Hal Rosenstock"); | 46 | MODULE_AUTHOR("Hal Rosenstock"); |
46 | MODULE_AUTHOR("Sean Hefty"); | 47 | MODULE_AUTHOR("Sean Hefty"); |
47 | 48 | ||
49 | int mad_sendq_size = IB_MAD_QP_SEND_SIZE; | ||
50 | int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | ||
51 | |||
52 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); | ||
53 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | ||
54 | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); | ||
55 | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); | ||
56 | |||
48 | static struct kmem_cache *ib_mad_cache; | 57 | static struct kmem_cache *ib_mad_cache; |
49 | 58 | ||
50 | static struct list_head ib_mad_port_list; | 59 | static struct list_head ib_mad_port_list; |
51 | static u32 ib_mad_client_id = 0; | 60 | static u32 ib_mad_client_id = 0; |
52 | 61 | ||
53 | /* Port list lock */ | 62 | /* Port list lock */ |
54 | static spinlock_t ib_mad_port_list_lock; | 63 | static DEFINE_SPINLOCK(ib_mad_port_list_lock); |
55 | |||
56 | 64 | ||
57 | /* Forward declarations */ | 65 | /* Forward declarations */ |
58 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | 66 | static int method_in_use(struct ib_mad_mgmt_method_table **method, |
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |||
1974 | unsigned long delay; | 1982 | unsigned long delay; |
1975 | 1983 | ||
1976 | if (list_empty(&mad_agent_priv->wait_list)) { | 1984 | if (list_empty(&mad_agent_priv->wait_list)) { |
1977 | cancel_delayed_work(&mad_agent_priv->timed_work); | 1985 | __cancel_delayed_work(&mad_agent_priv->timed_work); |
1978 | } else { | 1986 | } else { |
1979 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | 1987 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, |
1980 | struct ib_mad_send_wr_private, | 1988 | struct ib_mad_send_wr_private, |
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |||
1983 | if (time_after(mad_agent_priv->timeout, | 1991 | if (time_after(mad_agent_priv->timeout, |
1984 | mad_send_wr->timeout)) { | 1992 | mad_send_wr->timeout)) { |
1985 | mad_agent_priv->timeout = mad_send_wr->timeout; | 1993 | mad_agent_priv->timeout = mad_send_wr->timeout; |
1986 | cancel_delayed_work(&mad_agent_priv->timed_work); | 1994 | __cancel_delayed_work(&mad_agent_priv->timed_work); |
1987 | delay = mad_send_wr->timeout - jiffies; | 1995 | delay = mad_send_wr->timeout - jiffies; |
1988 | if ((long)delay <= 0) | 1996 | if ((long)delay <= 0) |
1989 | delay = 1; | 1997 | delay = 1; |
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) | |||
2023 | 2031 | ||
2024 | /* Reschedule a work item if we have a shorter timeout */ | 2032 | /* Reschedule a work item if we have a shorter timeout */ |
2025 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { | 2033 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { |
2026 | cancel_delayed_work(&mad_agent_priv->timed_work); | 2034 | __cancel_delayed_work(&mad_agent_priv->timed_work); |
2027 | queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | 2035 | queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
2028 | &mad_agent_priv->timed_work, delay); | 2036 | &mad_agent_priv->timed_work, delay); |
2029 | } | 2037 | } |
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |||
2736 | qp_init_attr.send_cq = qp_info->port_priv->cq; | 2744 | qp_init_attr.send_cq = qp_info->port_priv->cq; |
2737 | qp_init_attr.recv_cq = qp_info->port_priv->cq; | 2745 | qp_init_attr.recv_cq = qp_info->port_priv->cq; |
2738 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | 2746 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; |
2739 | qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; | 2747 | qp_init_attr.cap.max_send_wr = mad_sendq_size; |
2740 | qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; | 2748 | qp_init_attr.cap.max_recv_wr = mad_recvq_size; |
2741 | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; | 2749 | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; |
2742 | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | 2750 | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; |
2743 | qp_init_attr.qp_type = qp_type; | 2751 | qp_init_attr.qp_type = qp_type; |
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |||
2752 | goto error; | 2760 | goto error; |
2753 | } | 2761 | } |
2754 | /* Use minimum queue sizes unless the CQ is resized */ | 2762 | /* Use minimum queue sizes unless the CQ is resized */ |
2755 | qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; | 2763 | qp_info->send_queue.max_active = mad_sendq_size; |
2756 | qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; | 2764 | qp_info->recv_queue.max_active = mad_recvq_size; |
2757 | return 0; | 2765 | return 0; |
2758 | 2766 | ||
2759 | error: | 2767 | error: |
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2792 | init_mad_qp(port_priv, &port_priv->qp_info[0]); | 2800 | init_mad_qp(port_priv, &port_priv->qp_info[0]); |
2793 | init_mad_qp(port_priv, &port_priv->qp_info[1]); | 2801 | init_mad_qp(port_priv, &port_priv->qp_info[1]); |
2794 | 2802 | ||
2795 | cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; | 2803 | cq_size = (mad_sendq_size + mad_recvq_size) * 2; |
2796 | port_priv->cq = ib_create_cq(port_priv->device, | 2804 | port_priv->cq = ib_create_cq(port_priv->device, |
2797 | ib_mad_thread_completion_handler, | 2805 | ib_mad_thread_completion_handler, |
2798 | NULL, port_priv, cq_size, 0); | 2806 | NULL, port_priv, cq_size, 0); |
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void) | |||
2984 | { | 2992 | { |
2985 | int ret; | 2993 | int ret; |
2986 | 2994 | ||
2987 | spin_lock_init(&ib_mad_port_list_lock); | 2995 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); |
2996 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | ||
2997 | |||
2998 | mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); | ||
2999 | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); | ||
2988 | 3000 | ||
2989 | ib_mad_cache = kmem_cache_create("ib_mad", | 3001 | ib_mad_cache = kmem_cache_create("ib_mad", |
2990 | sizeof(struct ib_mad_private), | 3002 | sizeof(struct ib_mad_private), |
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void) | |||
3021 | 3033 | ||
3022 | module_init(ib_mad_init_module); | 3034 | module_init(ib_mad_init_module); |
3023 | module_exit(ib_mad_cleanup_module); | 3035 | module_exit(ib_mad_cleanup_module); |
3024 | |||
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 05ce331733b0..9430ab4969c5 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | 3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. | ||
5 | * | 6 | * |
6 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 8 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -49,6 +50,8 @@ | |||
49 | /* QP and CQ parameters */ | 50 | /* QP and CQ parameters */ |
50 | #define IB_MAD_QP_SEND_SIZE 128 | 51 | #define IB_MAD_QP_SEND_SIZE 128 |
51 | #define IB_MAD_QP_RECV_SIZE 512 | 52 | #define IB_MAD_QP_RECV_SIZE 512 |
53 | #define IB_MAD_QP_MIN_SIZE 64 | ||
54 | #define IB_MAD_QP_MAX_SIZE 8192 | ||
52 | #define IB_MAD_SEND_REQ_MAX_SG 2 | 55 | #define IB_MAD_SEND_REQ_MAX_SG 2 |
53 | #define IB_MAD_RECV_REQ_MAX_SG 1 | 56 | #define IB_MAD_RECV_REQ_MAX_SG 1 |
54 | 57 | ||
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 107f170c57cd..8d82ba171353 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -106,6 +106,8 @@ struct mcast_group { | |||
106 | struct ib_sa_query *query; | 106 | struct ib_sa_query *query; |
107 | int query_id; | 107 | int query_id; |
108 | u16 pkey_index; | 108 | u16 pkey_index; |
109 | u8 leave_state; | ||
110 | int retries; | ||
109 | }; | 111 | }; |
110 | 112 | ||
111 | struct mcast_member { | 113 | struct mcast_member { |
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) | |||
350 | 352 | ||
351 | rec = group->rec; | 353 | rec = group->rec; |
352 | rec.join_state = leave_state; | 354 | rec.join_state = leave_state; |
355 | group->leave_state = leave_state; | ||
353 | 356 | ||
354 | ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, | 357 | ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, |
355 | port->port_num, IB_SA_METHOD_DELETE, &rec, | 358 | port->port_num, IB_SA_METHOD_DELETE, &rec, |
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, | |||
542 | { | 545 | { |
543 | struct mcast_group *group = context; | 546 | struct mcast_group *group = context; |
544 | 547 | ||
545 | mcast_work_handler(&group->work); | 548 | if (status && group->retries > 0 && |
549 | !send_leave(group, group->leave_state)) | ||
550 | group->retries--; | ||
551 | else | ||
552 | mcast_work_handler(&group->work); | ||
546 | } | 553 | } |
547 | 554 | ||
548 | static struct mcast_group *acquire_group(struct mcast_port *port, | 555 | static struct mcast_group *acquire_group(struct mcast_port *port, |
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port, | |||
565 | if (!group) | 572 | if (!group) |
566 | return NULL; | 573 | return NULL; |
567 | 574 | ||
575 | group->retries = 3; | ||
568 | group->port = port; | 576 | group->port = port; |
569 | group->rec.mgid = *mgid; | 577 | group->rec.mgid = *mgid; |
570 | group->pkey_index = MCAST_INVALID_PKEY_INDEX; | 578 | group->pkey_index = MCAST_INVALID_PKEY_INDEX; |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1865049e80f7..82543716d59e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -109,10 +109,10 @@ static struct ib_client sa_client = { | |||
109 | .remove = ib_sa_remove_one | 109 | .remove = ib_sa_remove_one |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static spinlock_t idr_lock; | 112 | static DEFINE_SPINLOCK(idr_lock); |
113 | static DEFINE_IDR(query_idr); | 113 | static DEFINE_IDR(query_idr); |
114 | 114 | ||
115 | static spinlock_t tid_lock; | 115 | static DEFINE_SPINLOCK(tid_lock); |
116 | static u32 tid; | 116 | static u32 tid; |
117 | 117 | ||
118 | #define PATH_REC_FIELD(field) \ | 118 | #define PATH_REC_FIELD(field) \ |
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void) | |||
1077 | { | 1077 | { |
1078 | int ret; | 1078 | int ret; |
1079 | 1079 | ||
1080 | spin_lock_init(&idr_lock); | ||
1081 | spin_lock_init(&tid_lock); | ||
1082 | |||
1083 | get_random_bytes(&tid, sizeof tid); | 1080 | get_random_bytes(&tid, sizeof tid); |
1084 | 1081 | ||
1085 | ret = ib_register_client(&sa_client); | 1082 | ret = ib_register_client(&sa_client); |
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 87236753bce9..5855e4405d9b 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c | |||
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, | |||
52 | hop_cnt = smp->hop_cnt; | 52 | hop_cnt = smp->hop_cnt; |
53 | 53 | ||
54 | /* See section 14.2.2.2, Vol 1 IB spec */ | 54 | /* See section 14.2.2.2, Vol 1 IB spec */ |
55 | /* C14-6 -- valid hop_cnt values are from 0 to 63 */ | ||
56 | if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) | ||
57 | return IB_SMI_DISCARD; | ||
58 | |||
55 | if (!ib_get_smp_direction(smp)) { | 59 | if (!ib_get_smp_direction(smp)) { |
56 | /* C14-9:1 */ | 60 | /* C14-9:1 */ |
57 | if (hop_cnt && hop_ptr == 0) { | 61 | if (hop_cnt && hop_ptr == 0) { |
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, | |||
133 | hop_cnt = smp->hop_cnt; | 137 | hop_cnt = smp->hop_cnt; |
134 | 138 | ||
135 | /* See section 14.2.2.2, Vol 1 IB spec */ | 139 | /* See section 14.2.2.2, Vol 1 IB spec */ |
140 | /* C14-6 -- valid hop_cnt values are from 0 to 63 */ | ||
141 | if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) | ||
142 | return IB_SMI_DISCARD; | ||
143 | |||
136 | if (!ib_get_smp_direction(smp)) { | 144 | if (!ib_get_smp_direction(smp)) { |
137 | /* C14-9:1 -- sender should have incremented hop_ptr */ | 145 | /* C14-9:1 -- sender should have incremented hop_ptr */ |
138 | if (hop_cnt && hop_ptr == 0) | 146 | if (hop_cnt && hop_ptr == 0) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index eb36a81dd09b..d3fff9e008a3 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr); | |||
73 | DEFINE_IDR(ib_uverbs_qp_idr); | 73 | DEFINE_IDR(ib_uverbs_qp_idr); |
74 | DEFINE_IDR(ib_uverbs_srq_idr); | 74 | DEFINE_IDR(ib_uverbs_srq_idr); |
75 | 75 | ||
76 | static spinlock_t map_lock; | 76 | static DEFINE_SPINLOCK(map_lock); |
77 | static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; | 77 | static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; |
78 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 78 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
79 | 79 | ||
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
584 | 584 | ||
585 | if (hdr.command < 0 || | 585 | if (hdr.command < 0 || |
586 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | 586 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || |
587 | !uverbs_cmd_table[hdr.command] || | 587 | !uverbs_cmd_table[hdr.command]) |
588 | !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) | ||
589 | return -EINVAL; | 588 | return -EINVAL; |
590 | 589 | ||
591 | if (!file->ucontext && | 590 | if (!file->ucontext && |
592 | hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) | 591 | hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) |
593 | return -EINVAL; | 592 | return -EINVAL; |
594 | 593 | ||
594 | if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) | ||
595 | return -ENOSYS; | ||
596 | |||
595 | return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, | 597 | return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, |
596 | hdr.in_words * 4, hdr.out_words * 4); | 598 | hdr.in_words * 4, hdr.out_words * 4); |
597 | } | 599 | } |
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void) | |||
836 | { | 838 | { |
837 | int ret; | 839 | int ret; |
838 | 840 | ||
839 | spin_lock_init(&map_lock); | ||
840 | |||
841 | ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, | 841 | ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, |
842 | "infiniband_verbs"); | 842 | "infiniband_verbs"); |
843 | if (ret) { | 843 | if (ret) { |
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 8c5d2842fbb5..c61fd2b4a556 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table); | |||
86 | 86 | ||
87 | static void c2_print_macaddr(struct net_device *netdev) | 87 | static void c2_print_macaddr(struct net_device *netdev) |
88 | { | 88 | { |
89 | pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " | 89 | pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); |
90 | "IRQ %u\n", netdev->name, | ||
91 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
92 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], | ||
93 | netdev->irq); | ||
94 | } | 90 | } |
95 | 91 | ||
96 | static void c2_set_rxbufsize(struct c2_port *c2_port) | 92 | static void c2_set_rxbufsize(struct c2_port *c2_port) |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index f1948fad85d7..ad723bd8bf49 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev) | |||
780 | /* Register pseudo network device */ | 780 | /* Register pseudo network device */ |
781 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); | 781 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); |
782 | if (!dev->pseudo_netdev) | 782 | if (!dev->pseudo_netdev) |
783 | goto out3; | 783 | goto out; |
784 | 784 | ||
785 | ret = register_netdev(dev->pseudo_netdev); | 785 | ret = register_netdev(dev->pseudo_netdev); |
786 | if (ret) | 786 | if (ret) |
787 | goto out2; | 787 | goto out_free_netdev; |
788 | 788 | ||
789 | pr_debug("%s:%u\n", __func__, __LINE__); | 789 | pr_debug("%s:%u\n", __func__, __LINE__); |
790 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); | 790 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); |
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev) | |||
851 | dev->ibdev.post_recv = c2_post_receive; | 851 | dev->ibdev.post_recv = c2_post_receive; |
852 | 852 | ||
853 | dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); | 853 | dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); |
854 | if (dev->ibdev.iwcm == NULL) { | ||
855 | ret = -ENOMEM; | ||
856 | goto out_unregister_netdev; | ||
857 | } | ||
854 | dev->ibdev.iwcm->add_ref = c2_add_ref; | 858 | dev->ibdev.iwcm->add_ref = c2_add_ref; |
855 | dev->ibdev.iwcm->rem_ref = c2_rem_ref; | 859 | dev->ibdev.iwcm->rem_ref = c2_rem_ref; |
856 | dev->ibdev.iwcm->get_qp = c2_get_qp; | 860 | dev->ibdev.iwcm->get_qp = c2_get_qp; |
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev) | |||
862 | 866 | ||
863 | ret = ib_register_device(&dev->ibdev); | 867 | ret = ib_register_device(&dev->ibdev); |
864 | if (ret) | 868 | if (ret) |
865 | goto out1; | 869 | goto out_free_iwcm; |
866 | 870 | ||
867 | for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { | 871 | for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { |
868 | ret = device_create_file(&dev->ibdev.dev, | 872 | ret = device_create_file(&dev->ibdev.dev, |
869 | c2_dev_attributes[i]); | 873 | c2_dev_attributes[i]); |
870 | if (ret) | 874 | if (ret) |
871 | goto out0; | 875 | goto out_unregister_ibdev; |
872 | } | 876 | } |
873 | goto out3; | 877 | goto out; |
874 | 878 | ||
875 | out0: | 879 | out_unregister_ibdev: |
876 | ib_unregister_device(&dev->ibdev); | 880 | ib_unregister_device(&dev->ibdev); |
877 | out1: | 881 | out_free_iwcm: |
882 | kfree(dev->ibdev.iwcm); | ||
883 | out_unregister_netdev: | ||
878 | unregister_netdev(dev->pseudo_netdev); | 884 | unregister_netdev(dev->pseudo_netdev); |
879 | out2: | 885 | out_free_netdev: |
880 | free_netdev(dev->pseudo_netdev); | 886 | free_netdev(dev->pseudo_netdev); |
881 | out3: | 887 | out: |
882 | pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); | 888 | pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); |
883 | return ret; | 889 | return ret; |
884 | } | 890 | } |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 62f9cf2f94ec..72ed3396b721 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) | |||
852 | wqe->qpcaps = attr->qpcaps; | 852 | wqe->qpcaps = attr->qpcaps; |
853 | wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); | 853 | wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); |
854 | wqe->rqe_count = cpu_to_be16(attr->rqe_count); | 854 | wqe->rqe_count = cpu_to_be16(attr->rqe_count); |
855 | wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type)); | 855 | wqe->flags_rtr_type = cpu_to_be16(attr->flags | |
856 | V_RTR_TYPE(attr->rtr_type) | | ||
857 | V_CHAN(attr->chan)); | ||
856 | wqe->ord = cpu_to_be32(attr->ord); | 858 | wqe->ord = cpu_to_be32(attr->ord); |
857 | wqe->ird = cpu_to_be32(attr->ird); | 859 | wqe->ird = cpu_to_be32(attr->ird); |
858 | wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); | 860 | wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); |
@@ -1032,6 +1034,7 @@ err3: | |||
1032 | err2: | 1034 | err2: |
1033 | cxio_hal_destroy_ctrl_qp(rdev_p); | 1035 | cxio_hal_destroy_ctrl_qp(rdev_p); |
1034 | err1: | 1036 | err1: |
1037 | rdev_p->t3cdev_p->ulp = NULL; | ||
1035 | list_del(&rdev_p->entry); | 1038 | list_del(&rdev_p->entry); |
1036 | return err; | 1039 | return err; |
1037 | } | 1040 | } |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 32e3b1461d81..a197a5b7ac7f 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h | |||
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types { | |||
327 | #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) | 327 | #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) |
328 | #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) | 328 | #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) |
329 | 329 | ||
330 | #define S_CHAN 4 | ||
331 | #define M_CHAN 0x3 | ||
332 | #define V_CHAN(x) ((x) << S_CHAN) | ||
333 | #define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN) | ||
334 | |||
330 | struct t3_rdma_init_attr { | 335 | struct t3_rdma_init_attr { |
331 | u32 tid; | 336 | u32 tid; |
332 | u32 qpid; | 337 | u32 qpid; |
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr { | |||
346 | u16 flags; | 351 | u16 flags; |
347 | u16 rqe_count; | 352 | u16 rqe_count; |
348 | u32 irs; | 353 | u32 irs; |
354 | u32 chan; | ||
349 | }; | 355 | }; |
350 | 356 | ||
351 | struct t3_rdma_init_wr { | 357 | struct t3_rdma_init_wr { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 26fc0a4eaa74..b0ea0105ddf6 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c | |||
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; | |||
51 | 51 | ||
52 | static void open_rnic_dev(struct t3cdev *); | 52 | static void open_rnic_dev(struct t3cdev *); |
53 | static void close_rnic_dev(struct t3cdev *); | 53 | static void close_rnic_dev(struct t3cdev *); |
54 | static void iwch_err_handler(struct t3cdev *, u32, u32); | 54 | static void iwch_event_handler(struct t3cdev *, u32, u32); |
55 | 55 | ||
56 | struct cxgb3_client t3c_client = { | 56 | struct cxgb3_client t3c_client = { |
57 | .name = "iw_cxgb3", | 57 | .name = "iw_cxgb3", |
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = { | |||
59 | .remove = close_rnic_dev, | 59 | .remove = close_rnic_dev, |
60 | .handlers = t3c_handlers, | 60 | .handlers = t3c_handlers, |
61 | .redirect = iwch_ep_redirect, | 61 | .redirect = iwch_ep_redirect, |
62 | .err_handler = iwch_err_handler | 62 | .event_handler = iwch_event_handler |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static LIST_HEAD(dev_list); | 65 | static LIST_HEAD(dev_list); |
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp) | |||
105 | static void open_rnic_dev(struct t3cdev *tdev) | 105 | static void open_rnic_dev(struct t3cdev *tdev) |
106 | { | 106 | { |
107 | struct iwch_dev *rnicp; | 107 | struct iwch_dev *rnicp; |
108 | static int vers_printed; | ||
109 | 108 | ||
110 | PDBG("%s t3cdev %p\n", __func__, tdev); | 109 | PDBG("%s t3cdev %p\n", __func__, tdev); |
111 | if (!vers_printed++) | 110 | printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", |
112 | printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", | ||
113 | DRV_VERSION); | 111 | DRV_VERSION); |
114 | rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); | 112 | rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); |
115 | if (!rnicp) { | 113 | if (!rnicp) { |
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev) | |||
162 | mutex_unlock(&dev_mutex); | 160 | mutex_unlock(&dev_mutex); |
163 | } | 161 | } |
164 | 162 | ||
165 | static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) | 163 | static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) |
166 | { | 164 | { |
167 | struct cxio_rdev *rdev = tdev->ulp; | 165 | struct cxio_rdev *rdev = tdev->ulp; |
168 | struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); | 166 | struct iwch_dev *rnicp; |
169 | struct ib_event event; | 167 | struct ib_event event; |
168 | u32 portnum = port_id + 1; | ||
170 | 169 | ||
171 | if (status == OFFLOAD_STATUS_DOWN) { | 170 | if (!rdev) |
171 | return; | ||
172 | rnicp = rdev_to_iwch_dev(rdev); | ||
173 | switch (evt) { | ||
174 | case OFFLOAD_STATUS_DOWN: { | ||
172 | rdev->flags = CXIO_ERROR_FATAL; | 175 | rdev->flags = CXIO_ERROR_FATAL; |
173 | |||
174 | event.device = &rnicp->ibdev; | ||
175 | event.event = IB_EVENT_DEVICE_FATAL; | 176 | event.event = IB_EVENT_DEVICE_FATAL; |
176 | event.element.port_num = 0; | 177 | break; |
177 | ib_dispatch_event(&event); | 178 | } |
179 | case OFFLOAD_PORT_DOWN: { | ||
180 | event.event = IB_EVENT_PORT_ERR; | ||
181 | break; | ||
182 | } | ||
183 | case OFFLOAD_PORT_UP: { | ||
184 | event.event = IB_EVENT_PORT_ACTIVE; | ||
185 | break; | ||
186 | } | ||
178 | } | 187 | } |
179 | 188 | ||
189 | event.device = &rnicp->ibdev; | ||
190 | event.element.port_num = portnum; | ||
191 | ib_dispatch_event(&event); | ||
192 | |||
180 | return; | 193 | return; |
181 | } | 194 | } |
182 | 195 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 52d7bb0c2a12..66b41351910a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref) | |||
286 | ep = container_of(container_of(kref, struct iwch_ep_common, kref), | 286 | ep = container_of(container_of(kref, struct iwch_ep_common, kref), |
287 | struct iwch_ep, com); | 287 | struct iwch_ep, com); |
288 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | 288 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); |
289 | if (ep->com.flags & RELEASE_RESOURCES) { | 289 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
290 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 290 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); |
291 | dst_release(ep->dst); | 291 | dst_release(ep->dst); |
292 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 292 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); |
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref) | |||
297 | static void release_ep_resources(struct iwch_ep *ep) | 297 | static void release_ep_resources(struct iwch_ep *ep) |
298 | { | 298 | { |
299 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); | 299 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); |
300 | ep->com.flags |= RELEASE_RESOURCES; | 300 | set_bit(RELEASE_RESOURCES, &ep->com.flags); |
301 | put_ep(&ep->com); | 301 | put_ep(&ep->com); |
302 | } | 302 | } |
303 | 303 | ||
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep) | |||
786 | event.private_data_len = ep->plen; | 786 | event.private_data_len = ep->plen; |
787 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 787 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); |
788 | event.provider_data = ep; | 788 | event.provider_data = ep; |
789 | if (state_read(&ep->parent_ep->com) != DEAD) | 789 | if (state_read(&ep->parent_ep->com) != DEAD) { |
790 | get_ep(&ep->com); | ||
790 | ep->parent_ep->com.cm_id->event_handler( | 791 | ep->parent_ep->com.cm_id->event_handler( |
791 | ep->parent_ep->com.cm_id, | 792 | ep->parent_ep->com.cm_id, |
792 | &event); | 793 | &event); |
794 | } | ||
793 | put_ep(&ep->parent_ep->com); | 795 | put_ep(&ep->parent_ep->com); |
794 | ep->parent_ep = NULL; | 796 | ep->parent_ep = NULL; |
795 | } | 797 | } |
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1156 | * We get 2 abort replies from the HW. The first one must | 1158 | * We get 2 abort replies from the HW. The first one must |
1157 | * be ignored except for scribbling that we need one more. | 1159 | * be ignored except for scribbling that we need one more. |
1158 | */ | 1160 | */ |
1159 | if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { | 1161 | if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) { |
1160 | ep->com.flags |= ABORT_REQ_IN_PROGRESS; | ||
1161 | return CPL_RET_BUF_DONE; | 1162 | return CPL_RET_BUF_DONE; |
1162 | } | 1163 | } |
1163 | 1164 | ||
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1477 | /* | 1478 | /* |
1478 | * We're gonna mark this puppy DEAD, but keep | 1479 | * We're gonna mark this puppy DEAD, but keep |
1479 | * the reference on it until the ULP accepts or | 1480 | * the reference on it until the ULP accepts or |
1480 | * rejects the CR. | 1481 | * rejects the CR. Also wake up anyone waiting |
1482 | * in rdma connection migration (see iwch_accept_cr()). | ||
1481 | */ | 1483 | */ |
1482 | __state_set(&ep->com, CLOSING); | 1484 | __state_set(&ep->com, CLOSING); |
1483 | get_ep(&ep->com); | 1485 | ep->com.rpl_done = 1; |
1486 | ep->com.rpl_err = -ECONNRESET; | ||
1487 | PDBG("waking up ep %p\n", ep); | ||
1488 | wake_up(&ep->com.waitq); | ||
1484 | break; | 1489 | break; |
1485 | case MPA_REP_SENT: | 1490 | case MPA_REP_SENT: |
1486 | __state_set(&ep->com, CLOSING); | 1491 | __state_set(&ep->com, CLOSING); |
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1561 | * We get 2 peer aborts from the HW. The first one must | 1566 | * We get 2 peer aborts from the HW. The first one must |
1562 | * be ignored except for scribbling that we need one more. | 1567 | * be ignored except for scribbling that we need one more. |
1563 | */ | 1568 | */ |
1564 | if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { | 1569 | if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) { |
1565 | ep->com.flags |= PEER_ABORT_IN_PROGRESS; | ||
1566 | return CPL_RET_BUF_DONE; | 1570 | return CPL_RET_BUF_DONE; |
1567 | } | 1571 | } |
1568 | 1572 | ||
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1589 | /* | 1593 | /* |
1590 | * We're gonna mark this puppy DEAD, but keep | 1594 | * We're gonna mark this puppy DEAD, but keep |
1591 | * the reference on it until the ULP accepts or | 1595 | * the reference on it until the ULP accepts or |
1592 | * rejects the CR. | 1596 | * rejects the CR. Also wake up anyone waiting |
1597 | * in rdma connection migration (see iwch_accept_cr()). | ||
1593 | */ | 1598 | */ |
1594 | get_ep(&ep->com); | 1599 | ep->com.rpl_done = 1; |
1600 | ep->com.rpl_err = -ECONNRESET; | ||
1601 | PDBG("waking up ep %p\n", ep); | ||
1602 | wake_up(&ep->com.waitq); | ||
1595 | break; | 1603 | break; |
1596 | case MORIBUND: | 1604 | case MORIBUND: |
1597 | case CLOSING: | 1605 | case CLOSING: |
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | |||
1797 | err = send_mpa_reject(ep, pdata, pdata_len); | 1805 | err = send_mpa_reject(ep, pdata, pdata_len); |
1798 | err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); | 1806 | err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); |
1799 | } | 1807 | } |
1808 | put_ep(&ep->com); | ||
1800 | return 0; | 1809 | return 0; |
1801 | } | 1810 | } |
1802 | 1811 | ||
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1810 | struct iwch_qp *qp = get_qhp(h, conn_param->qpn); | 1819 | struct iwch_qp *qp = get_qhp(h, conn_param->qpn); |
1811 | 1820 | ||
1812 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1821 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
1813 | if (state_read(&ep->com) == DEAD) | 1822 | if (state_read(&ep->com) == DEAD) { |
1814 | return -ECONNRESET; | 1823 | err = -ECONNRESET; |
1824 | goto err; | ||
1825 | } | ||
1815 | 1826 | ||
1816 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | 1827 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); |
1817 | BUG_ON(!qp); | 1828 | BUG_ON(!qp); |
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1819 | if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || | 1830 | if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || |
1820 | (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { | 1831 | (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { |
1821 | abort_connection(ep, NULL, GFP_KERNEL); | 1832 | abort_connection(ep, NULL, GFP_KERNEL); |
1822 | return -EINVAL; | 1833 | err = -EINVAL; |
1834 | goto err; | ||
1823 | } | 1835 | } |
1824 | 1836 | ||
1825 | cm_id->add_ref(cm_id); | 1837 | cm_id->add_ref(cm_id); |
1826 | ep->com.cm_id = cm_id; | 1838 | ep->com.cm_id = cm_id; |
1827 | ep->com.qp = qp; | 1839 | ep->com.qp = qp; |
1828 | 1840 | ||
1829 | ep->com.rpl_done = 0; | ||
1830 | ep->com.rpl_err = 0; | ||
1831 | ep->ird = conn_param->ird; | 1841 | ep->ird = conn_param->ird; |
1832 | ep->ord = conn_param->ord; | 1842 | ep->ord = conn_param->ord; |
1833 | 1843 | ||
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1836 | 1846 | ||
1837 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | 1847 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); |
1838 | 1848 | ||
1839 | get_ep(&ep->com); | ||
1840 | |||
1841 | /* bind QP to EP and move to RTS */ | 1849 | /* bind QP to EP and move to RTS */ |
1842 | attrs.mpa_attr = ep->mpa_attr; | 1850 | attrs.mpa_attr = ep->mpa_attr; |
1843 | attrs.max_ird = ep->ird; | 1851 | attrs.max_ird = ep->ird; |
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1855 | err = iwch_modify_qp(ep->com.qp->rhp, | 1863 | err = iwch_modify_qp(ep->com.qp->rhp, |
1856 | ep->com.qp, mask, &attrs, 1); | 1864 | ep->com.qp, mask, &attrs, 1); |
1857 | if (err) | 1865 | if (err) |
1858 | goto err; | 1866 | goto err1; |
1859 | 1867 | ||
1860 | /* if needed, wait for wr_ack */ | 1868 | /* if needed, wait for wr_ack */ |
1861 | if (iwch_rqes_posted(qp)) { | 1869 | if (iwch_rqes_posted(qp)) { |
1862 | wait_event(ep->com.waitq, ep->com.rpl_done); | 1870 | wait_event(ep->com.waitq, ep->com.rpl_done); |
1863 | err = ep->com.rpl_err; | 1871 | err = ep->com.rpl_err; |
1864 | if (err) | 1872 | if (err) |
1865 | goto err; | 1873 | goto err1; |
1866 | } | 1874 | } |
1867 | 1875 | ||
1868 | err = send_mpa_reply(ep, conn_param->private_data, | 1876 | err = send_mpa_reply(ep, conn_param->private_data, |
1869 | conn_param->private_data_len); | 1877 | conn_param->private_data_len); |
1870 | if (err) | 1878 | if (err) |
1871 | goto err; | 1879 | goto err1; |
1872 | 1880 | ||
1873 | 1881 | ||
1874 | state_set(&ep->com, FPDU_MODE); | 1882 | state_set(&ep->com, FPDU_MODE); |
1875 | established_upcall(ep); | 1883 | established_upcall(ep); |
1876 | put_ep(&ep->com); | 1884 | put_ep(&ep->com); |
1877 | return 0; | 1885 | return 0; |
1878 | err: | 1886 | err1: |
1879 | ep->com.cm_id = NULL; | 1887 | ep->com.cm_id = NULL; |
1880 | ep->com.qp = NULL; | 1888 | ep->com.qp = NULL; |
1881 | cm_id->rem_ref(cm_id); | 1889 | cm_id->rem_ref(cm_id); |
1890 | err: | ||
1882 | put_ep(&ep->com); | 1891 | put_ep(&ep->com); |
1883 | return err; | 1892 | return err; |
1884 | } | 1893 | } |
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) | |||
2097 | ep->com.state = CLOSING; | 2106 | ep->com.state = CLOSING; |
2098 | start_ep_timer(ep); | 2107 | start_ep_timer(ep); |
2099 | } | 2108 | } |
2109 | set_bit(CLOSE_SENT, &ep->com.flags); | ||
2100 | break; | 2110 | break; |
2101 | case CLOSING: | 2111 | case CLOSING: |
2102 | close = 1; | 2112 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { |
2103 | if (abrupt) { | 2113 | close = 1; |
2104 | stop_ep_timer(ep); | 2114 | if (abrupt) { |
2105 | ep->com.state = ABORTING; | 2115 | stop_ep_timer(ep); |
2106 | } else | 2116 | ep->com.state = ABORTING; |
2107 | ep->com.state = MORIBUND; | 2117 | } else |
2118 | ep->com.state = MORIBUND; | ||
2119 | } | ||
2108 | break; | 2120 | break; |
2109 | case MORIBUND: | 2121 | case MORIBUND: |
2110 | case ABORTING: | 2122 | case ABORTING: |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h index 43c0aea7eadc..b9efadfffb4f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h | |||
@@ -145,9 +145,10 @@ enum iwch_ep_state { | |||
145 | }; | 145 | }; |
146 | 146 | ||
147 | enum iwch_ep_flags { | 147 | enum iwch_ep_flags { |
148 | PEER_ABORT_IN_PROGRESS = (1 << 0), | 148 | PEER_ABORT_IN_PROGRESS = 0, |
149 | ABORT_REQ_IN_PROGRESS = (1 << 1), | 149 | ABORT_REQ_IN_PROGRESS = 1, |
150 | RELEASE_RESOURCES = (1 << 2), | 150 | RELEASE_RESOURCES = 2, |
151 | CLOSE_SENT = 3, | ||
151 | }; | 152 | }; |
152 | 153 | ||
153 | struct iwch_ep_common { | 154 | struct iwch_ep_common { |
@@ -162,7 +163,7 @@ struct iwch_ep_common { | |||
162 | wait_queue_head_t waitq; | 163 | wait_queue_head_t waitq; |
163 | int rpl_done; | 164 | int rpl_done; |
164 | int rpl_err; | 165 | int rpl_err; |
165 | u32 flags; | 166 | unsigned long flags; |
166 | }; | 167 | }; |
167 | 168 | ||
168 | struct iwch_listen_ep { | 169 | struct iwch_listen_ep { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index ec49a5cbdebb..e1ec65ebb016 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include "iwch.h" | 39 | #include "iwch.h" |
40 | #include "iwch_provider.h" | 40 | #include "iwch_provider.h" |
41 | 41 | ||
42 | static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) | 42 | static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) |
43 | { | 43 | { |
44 | u32 mmid; | 44 | u32 mmid; |
45 | 45 | ||
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) | |||
47 | mhp->attr.stag = stag; | 47 | mhp->attr.stag = stag; |
48 | mmid = stag >> 8; | 48 | mmid = stag >> 8; |
49 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; | 49 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; |
50 | insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); | ||
51 | PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); | 50 | PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); |
51 | return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); | ||
52 | } | 52 | } |
53 | 53 | ||
54 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, | 54 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, |
55 | struct iwch_mr *mhp, int shift) | 55 | struct iwch_mr *mhp, int shift) |
56 | { | 56 | { |
57 | u32 stag; | 57 | u32 stag; |
58 | int ret; | ||
58 | 59 | ||
59 | if (cxio_register_phys_mem(&rhp->rdev, | 60 | if (cxio_register_phys_mem(&rhp->rdev, |
60 | &stag, mhp->attr.pdid, | 61 | &stag, mhp->attr.pdid, |
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, | |||
66 | mhp->attr.pbl_size, mhp->attr.pbl_addr)) | 67 | mhp->attr.pbl_size, mhp->attr.pbl_addr)) |
67 | return -ENOMEM; | 68 | return -ENOMEM; |
68 | 69 | ||
69 | iwch_finish_mem_reg(mhp, stag); | 70 | ret = iwch_finish_mem_reg(mhp, stag); |
70 | 71 | if (ret) | |
71 | return 0; | 72 | cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
73 | mhp->attr.pbl_addr); | ||
74 | return ret; | ||
72 | } | 75 | } |
73 | 76 | ||
74 | int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, | 77 | int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, |
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, | |||
77 | int npages) | 80 | int npages) |
78 | { | 81 | { |
79 | u32 stag; | 82 | u32 stag; |
83 | int ret; | ||
80 | 84 | ||
81 | /* We could support this... */ | 85 | /* We could support this... */ |
82 | if (npages > mhp->attr.pbl_size) | 86 | if (npages > mhp->attr.pbl_size) |
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, | |||
93 | mhp->attr.pbl_size, mhp->attr.pbl_addr)) | 97 | mhp->attr.pbl_size, mhp->attr.pbl_addr)) |
94 | return -ENOMEM; | 98 | return -ENOMEM; |
95 | 99 | ||
96 | iwch_finish_mem_reg(mhp, stag); | 100 | ret = iwch_finish_mem_reg(mhp, stag); |
101 | if (ret) | ||
102 | cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | ||
103 | mhp->attr.pbl_addr); | ||
97 | 104 | ||
98 | return 0; | 105 | return ret; |
99 | } | 106 | } |
100 | 107 | ||
101 | int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) | 108 | int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index e2a63214008a..6895523779d0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
195 | spin_lock_init(&chp->lock); | 195 | spin_lock_init(&chp->lock); |
196 | atomic_set(&chp->refcnt, 1); | 196 | atomic_set(&chp->refcnt, 1); |
197 | init_waitqueue_head(&chp->wait); | 197 | init_waitqueue_head(&chp->wait); |
198 | insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | 198 | if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { |
199 | cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); | ||
200 | kfree(chp); | ||
201 | return ERR_PTR(-ENOMEM); | ||
202 | } | ||
199 | 203 | ||
200 | if (ucontext) { | 204 | if (ucontext) { |
201 | struct iwch_mm_entry *mm; | 205 | struct iwch_mm_entry *mm; |
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | |||
750 | mhp->attr.stag = stag; | 754 | mhp->attr.stag = stag; |
751 | mmid = (stag) >> 8; | 755 | mmid = (stag) >> 8; |
752 | mhp->ibmw.rkey = stag; | 756 | mhp->ibmw.rkey = stag; |
753 | insert_handle(rhp, &rhp->mmidr, mhp, mmid); | 757 | if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { |
758 | cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
759 | kfree(mhp); | ||
760 | return ERR_PTR(-ENOMEM); | ||
761 | } | ||
754 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); | 762 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); |
755 | return &(mhp->ibmw); | 763 | return &(mhp->ibmw); |
756 | } | 764 | } |
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) | |||
778 | struct iwch_mr *mhp; | 786 | struct iwch_mr *mhp; |
779 | u32 mmid; | 787 | u32 mmid; |
780 | u32 stag = 0; | 788 | u32 stag = 0; |
781 | int ret; | 789 | int ret = 0; |
782 | 790 | ||
783 | php = to_iwch_pd(pd); | 791 | php = to_iwch_pd(pd); |
784 | rhp = php->rhp; | 792 | rhp = php->rhp; |
785 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 793 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
786 | if (!mhp) | 794 | if (!mhp) |
787 | return ERR_PTR(-ENOMEM); | 795 | goto err; |
788 | 796 | ||
789 | mhp->rhp = rhp; | 797 | mhp->rhp = rhp; |
790 | ret = iwch_alloc_pbl(mhp, pbl_depth); | 798 | ret = iwch_alloc_pbl(mhp, pbl_depth); |
791 | if (ret) { | 799 | if (ret) |
792 | kfree(mhp); | 800 | goto err1; |
793 | return ERR_PTR(ret); | ||
794 | } | ||
795 | mhp->attr.pbl_size = pbl_depth; | 801 | mhp->attr.pbl_size = pbl_depth; |
796 | ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, | 802 | ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, |
797 | mhp->attr.pbl_size, mhp->attr.pbl_addr); | 803 | mhp->attr.pbl_size, mhp->attr.pbl_addr); |
798 | if (ret) { | 804 | if (ret) |
799 | iwch_free_pbl(mhp); | 805 | goto err2; |
800 | kfree(mhp); | ||
801 | return ERR_PTR(ret); | ||
802 | } | ||
803 | mhp->attr.pdid = php->pdid; | 806 | mhp->attr.pdid = php->pdid; |
804 | mhp->attr.type = TPT_NON_SHARED_MR; | 807 | mhp->attr.type = TPT_NON_SHARED_MR; |
805 | mhp->attr.stag = stag; | 808 | mhp->attr.stag = stag; |
806 | mhp->attr.state = 1; | 809 | mhp->attr.state = 1; |
807 | mmid = (stag) >> 8; | 810 | mmid = (stag) >> 8; |
808 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; | 811 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; |
809 | insert_handle(rhp, &rhp->mmidr, mhp, mmid); | 812 | if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) |
813 | goto err3; | ||
814 | |||
810 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); | 815 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); |
811 | return &(mhp->ibmr); | 816 | return &(mhp->ibmr); |
817 | err3: | ||
818 | cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, | ||
819 | mhp->attr.pbl_addr); | ||
820 | err2: | ||
821 | iwch_free_pbl(mhp); | ||
822 | err1: | ||
823 | kfree(mhp); | ||
824 | err: | ||
825 | return ERR_PTR(ret); | ||
812 | } | 826 | } |
813 | 827 | ||
814 | static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( | 828 | static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( |
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
961 | spin_lock_init(&qhp->lock); | 975 | spin_lock_init(&qhp->lock); |
962 | init_waitqueue_head(&qhp->wait); | 976 | init_waitqueue_head(&qhp->wait); |
963 | atomic_set(&qhp->refcnt, 1); | 977 | atomic_set(&qhp->refcnt, 1); |
964 | insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); | 978 | |
979 | if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { | ||
980 | cxio_destroy_qp(&rhp->rdev, &qhp->wq, | ||
981 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | ||
982 | kfree(qhp); | ||
983 | return ERR_PTR(-ENOMEM); | ||
984 | } | ||
965 | 985 | ||
966 | if (udata) { | 986 | if (udata) { |
967 | 987 | ||
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1418 | bail2: | 1438 | bail2: |
1419 | ib_unregister_device(&dev->ibdev); | 1439 | ib_unregister_device(&dev->ibdev); |
1420 | bail1: | 1440 | bail1: |
1441 | kfree(dev->ibdev.iwcm); | ||
1421 | return ret; | 1442 | return ret; |
1422 | } | 1443 | } |
1423 | 1444 | ||
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev) | |||
1430 | device_remove_file(&dev->ibdev.dev, | 1451 | device_remove_file(&dev->ibdev.dev, |
1431 | iwch_class_attributes[i]); | 1452 | iwch_class_attributes[i]); |
1432 | ib_unregister_device(&dev->ibdev); | 1453 | ib_unregister_device(&dev->ibdev); |
1454 | kfree(dev->ibdev.iwcm); | ||
1433 | return; | 1455 | return; |
1434 | } | 1456 | } |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 27bbdc8e773a..6e8653471941 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
889 | init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); | 889 | init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); |
890 | init_attr.rqe_count = iwch_rqes_posted(qhp); | 890 | init_attr.rqe_count = iwch_rqes_posted(qhp); |
891 | init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; | 891 | init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; |
892 | init_attr.chan = qhp->ep->l2t->smt_idx; | ||
892 | if (peer2peer) { | 893 | if (peer2peer) { |
893 | init_attr.rtr_type = RTR_READ; | 894 | init_attr.rtr_type = RTR_READ; |
894 | if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) | 895 | if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index fab18a2c74a8..5b635aa5947e 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include "ehca_tools.h" | 52 | #include "ehca_tools.h" |
53 | #include "hcp_if.h" | 53 | #include "hcp_if.h" |
54 | 54 | ||
55 | #define HCAD_VERSION "0028" | 55 | #define HCAD_VERSION "0029" |
56 | 56 | ||
57 | MODULE_LICENSE("Dual BSD/GPL"); | 57 | MODULE_LICENSE("Dual BSD/GPL"); |
58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0; | |||
64 | static int ehca_poll_all_eqs = 1; | 64 | static int ehca_poll_all_eqs = 1; |
65 | 65 | ||
66 | int ehca_debug_level = 0; | 66 | int ehca_debug_level = 0; |
67 | int ehca_nr_ports = 2; | 67 | int ehca_nr_ports = -1; |
68 | int ehca_use_hp_mr = 0; | 68 | int ehca_use_hp_mr = 0; |
69 | int ehca_port_act_time = 30; | 69 | int ehca_port_act_time = 30; |
70 | int ehca_static_rate = -1; | 70 | int ehca_static_rate = -1; |
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level, | |||
95 | "Hardware level (0: autosensing (default), " | 95 | "Hardware level (0: autosensing (default), " |
96 | "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); | 96 | "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); |
97 | MODULE_PARM_DESC(nr_ports, | 97 | MODULE_PARM_DESC(nr_ports, |
98 | "number of connected ports (-1: autodetect, 1: port one only, " | 98 | "number of connected ports (-1: autodetect (default), " |
99 | "2: two ports (default)"); | 99 | "1: port one only, 2: two ports)"); |
100 | MODULE_PARM_DESC(use_hp_mr, | 100 | MODULE_PARM_DESC(use_hp_mr, |
101 | "Use high performance MRs (default: no)"); | 101 | "Use high performance MRs (default: no)"); |
102 | MODULE_PARM_DESC(port_act_time, | 102 | MODULE_PARM_DESC(port_act_time, |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 5a3d96f84c79..8fd88cd828fd 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -786,7 +786,11 @@ repoll: | |||
786 | wc->slid = cqe->rlid; | 786 | wc->slid = cqe->rlid; |
787 | wc->dlid_path_bits = cqe->dlid; | 787 | wc->dlid_path_bits = cqe->dlid; |
788 | wc->src_qp = cqe->remote_qp_number; | 788 | wc->src_qp = cqe->remote_qp_number; |
789 | wc->wc_flags = cqe->w_completion_flags; | 789 | /* |
790 | * HW has "Immed data present" and "GRH present" in bits 6 and 5. | ||
791 | * SW defines those in bits 1 and 0, so we can just shift and mask. | ||
792 | */ | ||
793 | wc->wc_flags = (cqe->w_completion_flags >> 5) & 3; | ||
790 | wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); | 794 | wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); |
791 | wc->sl = cqe->service_level; | 795 | wc->sl = cqe->service_level; |
792 | 796 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index c568b28f4e20..8c1213f8916a 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c | |||
@@ -125,14 +125,30 @@ struct ib_perf { | |||
125 | u8 data[192]; | 125 | u8 data[192]; |
126 | } __attribute__ ((packed)); | 126 | } __attribute__ ((packed)); |
127 | 127 | ||
128 | /* TC/SL/FL packed into 32 bits, as in ClassPortInfo */ | ||
129 | struct tcslfl { | ||
130 | u32 tc:8; | ||
131 | u32 sl:4; | ||
132 | u32 fl:20; | ||
133 | } __attribute__ ((packed)); | ||
134 | |||
135 | /* IP Version/TC/FL packed into 32 bits, as in GRH */ | ||
136 | struct vertcfl { | ||
137 | u32 ver:4; | ||
138 | u32 tc:8; | ||
139 | u32 fl:20; | ||
140 | } __attribute__ ((packed)); | ||
128 | 141 | ||
129 | static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, | 142 | static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, |
143 | struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
130 | struct ib_mad *in_mad, struct ib_mad *out_mad) | 144 | struct ib_mad *in_mad, struct ib_mad *out_mad) |
131 | { | 145 | { |
132 | struct ib_perf *in_perf = (struct ib_perf *)in_mad; | 146 | struct ib_perf *in_perf = (struct ib_perf *)in_mad; |
133 | struct ib_perf *out_perf = (struct ib_perf *)out_mad; | 147 | struct ib_perf *out_perf = (struct ib_perf *)out_mad; |
134 | struct ib_class_port_info *poi = | 148 | struct ib_class_port_info *poi = |
135 | (struct ib_class_port_info *)out_perf->data; | 149 | (struct ib_class_port_info *)out_perf->data; |
150 | struct tcslfl *tcslfl = | ||
151 | (struct tcslfl *)&poi->redirect_tcslfl; | ||
136 | struct ehca_shca *shca = | 152 | struct ehca_shca *shca = |
137 | container_of(ibdev, struct ehca_shca, ib_device); | 153 | container_of(ibdev, struct ehca_shca, ib_device); |
138 | struct ehca_sport *sport = &shca->sport[port_num - 1]; | 154 | struct ehca_sport *sport = &shca->sport[port_num - 1]; |
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, | |||
158 | poi->base_version = 1; | 174 | poi->base_version = 1; |
159 | poi->class_version = 1; | 175 | poi->class_version = 1; |
160 | poi->resp_time_value = 18; | 176 | poi->resp_time_value = 18; |
161 | poi->redirect_lid = sport->saved_attr.lid; | 177 | |
162 | poi->redirect_qp = sport->pma_qp_nr; | 178 | /* copy local routing information from WC where applicable */ |
179 | tcslfl->sl = in_wc->sl; | ||
180 | poi->redirect_lid = | ||
181 | sport->saved_attr.lid | in_wc->dlid_path_bits; | ||
182 | poi->redirect_qp = sport->pma_qp_nr; | ||
163 | poi->redirect_qkey = IB_QP1_QKEY; | 183 | poi->redirect_qkey = IB_QP1_QKEY; |
164 | poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; | 184 | |
185 | ehca_query_pkey(ibdev, port_num, in_wc->pkey_index, | ||
186 | &poi->redirect_pkey); | ||
187 | |||
188 | /* if request was globally routed, copy route info */ | ||
189 | if (in_grh) { | ||
190 | struct vertcfl *vertcfl = | ||
191 | (struct vertcfl *)&in_grh->version_tclass_flow; | ||
192 | memcpy(poi->redirect_gid, in_grh->dgid.raw, | ||
193 | sizeof(poi->redirect_gid)); | ||
194 | tcslfl->tc = vertcfl->tc; | ||
195 | tcslfl->fl = vertcfl->fl; | ||
196 | } else | ||
197 | /* else only fill in default GID */ | ||
198 | ehca_query_gid(ibdev, port_num, 0, | ||
199 | (union ib_gid *)&poi->redirect_gid); | ||
165 | 200 | ||
166 | ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", | 201 | ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", |
167 | sport->saved_attr.lid, sport->pma_qp_nr); | 202 | sport->saved_attr.lid, sport->pma_qp_nr); |
@@ -183,8 +218,7 @@ perf_reply: | |||
183 | 218 | ||
184 | int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | 219 | int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
185 | struct ib_wc *in_wc, struct ib_grh *in_grh, | 220 | struct ib_wc *in_wc, struct ib_grh *in_grh, |
186 | struct ib_mad *in_mad, | 221 | struct ib_mad *in_mad, struct ib_mad *out_mad) |
187 | struct ib_mad *out_mad) | ||
188 | { | 222 | { |
189 | int ret; | 223 | int ret; |
190 | 224 | ||
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
196 | return IB_MAD_RESULT_SUCCESS; | 230 | return IB_MAD_RESULT_SUCCESS; |
197 | 231 | ||
198 | ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); | 232 | ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); |
199 | ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); | 233 | ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh, |
234 | in_mad, out_mad); | ||
200 | 235 | ||
201 | return ret; | 236 | return ret; |
202 | } | 237 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 23173982b32c..38a287006612 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, | |||
1616 | pd->port_cnt = 1; | 1616 | pd->port_cnt = 1; |
1617 | port_fp(fp) = pd; | 1617 | port_fp(fp) = pd; |
1618 | pd->port_pid = get_pid(task_pid(current)); | 1618 | pd->port_pid = get_pid(task_pid(current)); |
1619 | strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); | 1619 | strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); |
1620 | ipath_stats.sps_ports++; | 1620 | ipath_stats.sps_ports++; |
1621 | ret = 0; | 1621 | ret = 0; |
1622 | } else | 1622 | } else |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 16a702d46018..ceb98ee78666 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp, | |||
60 | if (smp->attr_mod) | 60 | if (smp->attr_mod) |
61 | smp->status |= IB_SMP_INVALID_FIELD; | 61 | smp->status |= IB_SMP_INVALID_FIELD; |
62 | 62 | ||
63 | strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); | 63 | memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); |
64 | 64 | ||
65 | return reply(smp); | 65 | return reply(smp); |
66 | } | 66 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ae3d7590346e..3cb3f47a10b8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, | |||
342 | struct mlx4_ib_alloc_ucontext_resp resp; | 342 | struct mlx4_ib_alloc_ucontext_resp resp; |
343 | int err; | 343 | int err; |
344 | 344 | ||
345 | if (!dev->ib_active) | ||
346 | return ERR_PTR(-EAGAIN); | ||
347 | |||
345 | resp.qp_tab_size = dev->dev->caps.num_qps; | 348 | resp.qp_tab_size = dev->dev->caps.num_qps; |
346 | resp.bf_reg_size = dev->dev->caps.bf_reg_size; | 349 | resp.bf_reg_size = dev->dev->caps.bf_reg_size; |
347 | resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; | 350 | resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; |
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = { | |||
540 | 543 | ||
541 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 544 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
542 | { | 545 | { |
543 | static int mlx4_ib_version_printed; | ||
544 | struct mlx4_ib_dev *ibdev; | 546 | struct mlx4_ib_dev *ibdev; |
545 | int num_ports = 0; | 547 | int num_ports = 0; |
546 | int i; | 548 | int i; |
547 | 549 | ||
548 | if (!mlx4_ib_version_printed) { | 550 | printk_once(KERN_INFO "%s", mlx4_ib_version); |
549 | printk(KERN_INFO "%s", mlx4_ib_version); | ||
550 | ++mlx4_ib_version_printed; | ||
551 | } | ||
552 | 551 | ||
553 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 552 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
554 | num_ports++; | 553 | num_ports++; |
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
673 | goto err_reg; | 672 | goto err_reg; |
674 | } | 673 | } |
675 | 674 | ||
675 | ibdev->ib_active = true; | ||
676 | |||
676 | return ibdev; | 677 | return ibdev; |
677 | 678 | ||
678 | err_reg: | 679 | err_reg: |
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
729 | break; | 730 | break; |
730 | 731 | ||
731 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: | 732 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: |
733 | ibdev->ib_active = false; | ||
732 | ibev.event = IB_EVENT_DEVICE_FATAL; | 734 | ibev.event = IB_EVENT_DEVICE_FATAL; |
733 | break; | 735 | break; |
734 | 736 | ||
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 8a7dd6795fa0..3486d7675e56 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -175,6 +175,7 @@ struct mlx4_ib_dev { | |||
175 | spinlock_t sm_lock; | 175 | spinlock_t sm_lock; |
176 | 176 | ||
177 | struct mutex cap_mask_mutex; | 177 | struct mutex cap_mask_mutex; |
178 | bool ib_active; | ||
178 | }; | 179 | }; |
179 | 180 | ||
180 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) | 181 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index c4a02648c8af..219b10397b4d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) | |||
615 | } | 615 | } |
616 | 616 | ||
617 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | 617 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) |
618 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | ||
618 | { | 619 | { |
619 | if (send_cq == recv_cq) | 620 | if (send_cq == recv_cq) { |
620 | spin_lock_irq(&send_cq->lock); | 621 | spin_lock_irq(&send_cq->lock); |
621 | else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 622 | __acquire(&recv_cq->lock); |
623 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | ||
622 | spin_lock_irq(&send_cq->lock); | 624 | spin_lock_irq(&send_cq->lock); |
623 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | 625 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); |
624 | } else { | 626 | } else { |
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv | |||
628 | } | 630 | } |
629 | 631 | ||
630 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | 632 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) |
633 | __releases(&send_cq->lock) __releases(&recv_cq->lock) | ||
631 | { | 634 | { |
632 | if (send_cq == recv_cq) | 635 | if (send_cq == recv_cq) { |
636 | __release(&recv_cq->lock); | ||
633 | spin_unlock_irq(&send_cq->lock); | 637 | spin_unlock_irq(&send_cq->lock); |
634 | else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 638 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { |
635 | spin_unlock(&recv_cq->lock); | 639 | spin_unlock(&recv_cq->lock); |
636 | spin_unlock_irq(&send_cq->lock); | 640 | spin_unlock_irq(&send_cq->lock); |
637 | } else { | 641 | } else { |
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index 65ad359fdf16..056b2a4c6970 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev) | |||
88 | event.device = &dev->ib_dev; | 88 | event.device = &dev->ib_dev; |
89 | event.event = IB_EVENT_DEVICE_FATAL; | 89 | event.event = IB_EVENT_DEVICE_FATAL; |
90 | event.element.port_num = 0; | 90 | event.element.port_num = 0; |
91 | dev->active = false; | ||
91 | 92 | ||
92 | ib_dispatch_event(&event); | 93 | ib_dispatch_event(&event); |
93 | 94 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h index 75671f75cac4..155bc66395be 100644 --- a/drivers/infiniband/hw/mthca/mthca_config_reg.h +++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h | |||
@@ -34,8 +34,6 @@ | |||
34 | #ifndef MTHCA_CONFIG_REG_H | 34 | #ifndef MTHCA_CONFIG_REG_H |
35 | #define MTHCA_CONFIG_REG_H | 35 | #define MTHCA_CONFIG_REG_H |
36 | 36 | ||
37 | #include <asm/page.h> | ||
38 | |||
39 | #define MTHCA_HCR_BASE 0x80680 | 37 | #define MTHCA_HCR_BASE 0x80680 |
40 | #define MTHCA_HCR_SIZE 0x0001c | 38 | #define MTHCA_HCR_SIZE 0x0001c |
41 | #define MTHCA_ECR_BASE 0x80700 | 39 | #define MTHCA_ECR_BASE 0x80700 |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 9ef611f6dd36..7e6a6d64ad4e 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -357,6 +357,7 @@ struct mthca_dev { | |||
357 | struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; | 357 | struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; |
358 | spinlock_t sm_lock; | 358 | spinlock_t sm_lock; |
359 | u8 rate[MTHCA_MAX_PORTS]; | 359 | u8 rate[MTHCA_MAX_PORTS]; |
360 | bool active; | ||
360 | }; | 361 | }; |
361 | 362 | ||
362 | #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG | 363 | #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 90e4e450a120..8c31fa36e95e 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev) | |||
829 | 829 | ||
830 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { | 830 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { |
831 | static const char *eq_name[] = { | 831 | static const char *eq_name[] = { |
832 | [MTHCA_EQ_COMP] = DRV_NAME " (comp)", | 832 | [MTHCA_EQ_COMP] = DRV_NAME "-comp", |
833 | [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", | 833 | [MTHCA_EQ_ASYNC] = DRV_NAME "-async", |
834 | [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" | 834 | [MTHCA_EQ_CMD] = DRV_NAME "-cmd" |
835 | }; | 835 | }; |
836 | 836 | ||
837 | for (i = 0; i < MTHCA_NUM_EQ; ++i) { | 837 | for (i = 0; i < MTHCA_NUM_EQ; ++i) { |
838 | snprintf(dev->eq_table.eq[i].irq_name, | ||
839 | IB_DEVICE_NAME_MAX, | ||
840 | "%s@pci:%s", eq_name[i], | ||
841 | pci_name(dev->pdev)); | ||
838 | err = request_irq(dev->eq_table.eq[i].msi_x_vector, | 842 | err = request_irq(dev->eq_table.eq[i].msi_x_vector, |
839 | mthca_is_memfree(dev) ? | 843 | mthca_is_memfree(dev) ? |
840 | mthca_arbel_msi_x_interrupt : | 844 | mthca_arbel_msi_x_interrupt : |
841 | mthca_tavor_msi_x_interrupt, | 845 | mthca_tavor_msi_x_interrupt, |
842 | 0, eq_name[i], dev->eq_table.eq + i); | 846 | 0, dev->eq_table.eq[i].irq_name, |
847 | dev->eq_table.eq + i); | ||
843 | if (err) | 848 | if (err) |
844 | goto err_out_cmd; | 849 | goto err_out_cmd; |
845 | dev->eq_table.eq[i].have_irq = 1; | 850 | dev->eq_table.eq[i].have_irq = 1; |
846 | } | 851 | } |
847 | } else { | 852 | } else { |
853 | snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX, | ||
854 | DRV_NAME "@pci:%s", pci_name(dev->pdev)); | ||
848 | err = request_irq(dev->pdev->irq, | 855 | err = request_irq(dev->pdev->irq, |
849 | mthca_is_memfree(dev) ? | 856 | mthca_is_memfree(dev) ? |
850 | mthca_arbel_interrupt : | 857 | mthca_arbel_interrupt : |
851 | mthca_tavor_interrupt, | 858 | mthca_tavor_interrupt, |
852 | IRQF_SHARED, DRV_NAME, dev); | 859 | IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev); |
853 | if (err) | 860 | if (err) |
854 | goto err_out_cmd; | 861 | goto err_out_cmd; |
855 | dev->eq_table.have_irq = 1; | 862 | dev->eq_table.have_irq = 1; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 13da9f1d24c0..b01b28987874 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) | |||
1116 | pci_set_drvdata(pdev, mdev); | 1116 | pci_set_drvdata(pdev, mdev); |
1117 | mdev->hca_type = hca_type; | 1117 | mdev->hca_type = hca_type; |
1118 | 1118 | ||
1119 | mdev->active = true; | ||
1120 | |||
1119 | return 0; | 1121 | return 0; |
1120 | 1122 | ||
1121 | err_unregister: | 1123 | err_unregister: |
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev) | |||
1215 | static int __devinit mthca_init_one(struct pci_dev *pdev, | 1217 | static int __devinit mthca_init_one(struct pci_dev *pdev, |
1216 | const struct pci_device_id *id) | 1218 | const struct pci_device_id *id) |
1217 | { | 1219 | { |
1218 | static int mthca_version_printed = 0; | ||
1219 | int ret; | 1220 | int ret; |
1220 | 1221 | ||
1221 | mutex_lock(&mthca_device_mutex); | 1222 | mutex_lock(&mthca_device_mutex); |
1222 | 1223 | ||
1223 | if (!mthca_version_printed) { | 1224 | printk_once(KERN_INFO "%s", mthca_version); |
1224 | printk(KERN_INFO "%s", mthca_version); | ||
1225 | ++mthca_version_printed; | ||
1226 | } | ||
1227 | 1225 | ||
1228 | if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { | 1226 | if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { |
1229 | printk(KERN_ERR PFX "%s has invalid driver data %lx\n", | 1227 | printk(KERN_ERR PFX "%s has invalid driver data %lx\n", |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 87ad889e367b..bcf7a4014820 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, | |||
334 | struct mthca_ucontext *context; | 334 | struct mthca_ucontext *context; |
335 | int err; | 335 | int err; |
336 | 336 | ||
337 | if (!(to_mdev(ibdev)->active)) | ||
338 | return ERR_PTR(-EAGAIN); | ||
339 | |||
337 | memset(&uresp, 0, sizeof uresp); | 340 | memset(&uresp, 0, sizeof uresp); |
338 | 341 | ||
339 | uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; | 342 | uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index c621f8794b88..90f4c4d2e983 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -113,6 +113,7 @@ struct mthca_eq { | |||
113 | int nent; | 113 | int nent; |
114 | struct mthca_buf_list *page_list; | 114 | struct mthca_buf_list *page_list; |
115 | struct mthca_mr mr; | 115 | struct mthca_mr mr; |
116 | char irq_name[IB_DEVICE_NAME_MAX]; | ||
116 | }; | 117 | }; |
117 | 118 | ||
118 | struct mthca_av; | 119 | struct mthca_av; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f5081bfde6db..c10576fa60c1 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev, | |||
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | 1321 | static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) |
1322 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | ||
1322 | { | 1323 | { |
1323 | if (send_cq == recv_cq) | 1324 | if (send_cq == recv_cq) { |
1324 | spin_lock_irq(&send_cq->lock); | 1325 | spin_lock_irq(&send_cq->lock); |
1325 | else if (send_cq->cqn < recv_cq->cqn) { | 1326 | __acquire(&recv_cq->lock); |
1327 | } else if (send_cq->cqn < recv_cq->cqn) { | ||
1326 | spin_lock_irq(&send_cq->lock); | 1328 | spin_lock_irq(&send_cq->lock); |
1327 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | 1329 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); |
1328 | } else { | 1330 | } else { |
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | |||
1332 | } | 1334 | } |
1333 | 1335 | ||
1334 | static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | 1336 | static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) |
1337 | __releases(&send_cq->lock) __releases(&recv_cq->lock) | ||
1335 | { | 1338 | { |
1336 | if (send_cq == recv_cq) | 1339 | if (send_cq == recv_cq) { |
1340 | __release(&recv_cq->lock); | ||
1337 | spin_unlock_irq(&send_cq->lock); | 1341 | spin_unlock_irq(&send_cq->lock); |
1338 | else if (send_cq->cqn < recv_cq->cqn) { | 1342 | } else if (send_cq->cqn < recv_cq->cqn) { |
1339 | spin_unlock(&recv_cq->lock); | 1343 | spin_unlock(&recv_cq->lock); |
1340 | spin_unlock_irq(&send_cq->lock); | 1344 | spin_unlock_irq(&send_cq->lock); |
1341 | } else { | 1345 | } else { |
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c index acb6817f6060..2a13a163d337 100644 --- a/drivers/infiniband/hw/mthca/mthca_reset.c +++ b/drivers/infiniband/hw/mthca/mthca_reset.c | |||
@@ -30,7 +30,6 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/errno.h> | 33 | #include <linux/errno.h> |
35 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
36 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index bf1720f7f35f..bcc6abc4faff 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *); | |||
523 | void nes_cm_disconn_worker(void *); | 523 | void nes_cm_disconn_worker(void *); |
524 | 524 | ||
525 | /* nes_verbs.c */ | 525 | /* nes_verbs.c */ |
526 | int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); | 526 | int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32); |
527 | int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); | 527 | int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); |
528 | struct nes_ib_device *nes_init_ofa_device(struct net_device *); | 528 | struct nes_ib_device *nes_init_ofa_device(struct net_device *); |
529 | void nes_destroy_ofa_device(struct nes_ib_device *); | 529 | void nes_destroy_ofa_device(struct nes_ib_device *); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 114b802771ad..73473db19863 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2450 | */ | 2450 | */ |
2451 | int nes_cm_disconn(struct nes_qp *nesqp) | 2451 | int nes_cm_disconn(struct nes_qp *nesqp) |
2452 | { | 2452 | { |
2453 | unsigned long flags; | 2453 | struct disconn_work *work; |
2454 | |||
2455 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2456 | if (nesqp->disconn_pending == 0) { | ||
2457 | nesqp->disconn_pending++; | ||
2458 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2459 | /* init our disconnect work element, to */ | ||
2460 | INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker); | ||
2461 | 2454 | ||
2462 | queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); | 2455 | work = kzalloc(sizeof *work, GFP_ATOMIC); |
2463 | } else | 2456 | if (!work) |
2464 | spin_unlock_irqrestore(&nesqp->lock, flags); | 2457 | return -ENOMEM; /* Timer will clean up */ |
2465 | 2458 | ||
2459 | nes_add_ref(&nesqp->ibqp); | ||
2460 | work->nesqp = nesqp; | ||
2461 | INIT_WORK(&work->work, nes_disconnect_worker); | ||
2462 | queue_work(g_cm_core->disconn_wq, &work->work); | ||
2466 | return 0; | 2463 | return 0; |
2467 | } | 2464 | } |
2468 | 2465 | ||
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp) | |||
2472 | */ | 2469 | */ |
2473 | static void nes_disconnect_worker(struct work_struct *work) | 2470 | static void nes_disconnect_worker(struct work_struct *work) |
2474 | { | 2471 | { |
2475 | struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); | 2472 | struct disconn_work *dwork = container_of(work, struct disconn_work, work); |
2473 | struct nes_qp *nesqp = dwork->nesqp; | ||
2476 | 2474 | ||
2475 | kfree(dwork); | ||
2477 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", | 2476 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", |
2478 | nesqp->last_aeq, nesqp->hwqp.qp_id); | 2477 | nesqp->last_aeq, nesqp->hwqp.qp_id); |
2479 | nes_cm_disconn_true(nesqp); | 2478 | nes_cm_disconn_true(nesqp); |
2479 | nes_rem_ref(&nesqp->ibqp); | ||
2480 | } | 2480 | } |
2481 | 2481 | ||
2482 | 2482 | ||
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2493 | u16 last_ae; | 2493 | u16 last_ae; |
2494 | u8 original_hw_tcp_state; | 2494 | u8 original_hw_tcp_state; |
2495 | u8 original_ibqp_state; | 2495 | u8 original_ibqp_state; |
2496 | u8 issued_disconnect_reset = 0; | 2496 | enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK; |
2497 | int issue_disconn = 0; | ||
2498 | int issue_close = 0; | ||
2499 | int issue_flush = 0; | ||
2500 | u32 flush_q = NES_CQP_FLUSH_RQ; | ||
2501 | struct ib_event ibevent; | ||
2497 | 2502 | ||
2498 | if (!nesqp) { | 2503 | if (!nesqp) { |
2499 | nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); | 2504 | nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); |
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2517 | original_ibqp_state = nesqp->ibqp_state; | 2522 | original_ibqp_state = nesqp->ibqp_state; |
2518 | last_ae = nesqp->last_aeq; | 2523 | last_ae = nesqp->last_aeq; |
2519 | 2524 | ||
2525 | if (nesqp->term_flags) { | ||
2526 | issue_disconn = 1; | ||
2527 | issue_close = 1; | ||
2528 | nesqp->cm_id = NULL; | ||
2529 | if (nesqp->flush_issued == 0) { | ||
2530 | nesqp->flush_issued = 1; | ||
2531 | issue_flush = 1; | ||
2532 | } | ||
2533 | } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
2534 | ((original_ibqp_state == IB_QPS_RTS) && | ||
2535 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2536 | issue_disconn = 1; | ||
2537 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) | ||
2538 | disconn_status = IW_CM_EVENT_STATUS_RESET; | ||
2539 | } | ||
2540 | |||
2541 | if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | ||
2542 | (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || | ||
2543 | (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || | ||
2544 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2545 | issue_close = 1; | ||
2546 | nesqp->cm_id = NULL; | ||
2547 | if (nesqp->flush_issued == 0) { | ||
2548 | nesqp->flush_issued = 1; | ||
2549 | issue_flush = 1; | ||
2550 | } | ||
2551 | } | ||
2552 | |||
2553 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2520 | 2554 | ||
2521 | nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); | 2555 | if ((issue_flush) && (nesqp->destroyed == 0)) { |
2556 | /* Flush the queue(s) */ | ||
2557 | if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE) | ||
2558 | flush_q |= NES_CQP_FLUSH_SQ; | ||
2559 | flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1); | ||
2522 | 2560 | ||
2523 | if ((nesqp->cm_id) && (cm_id->event_handler)) { | 2561 | if (nesqp->term_flags) { |
2524 | if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | 2562 | ibevent.device = nesqp->ibqp.device; |
2525 | ((original_ibqp_state == IB_QPS_RTS) && | 2563 | ibevent.event = nesqp->terminate_eventtype; |
2526 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | 2564 | ibevent.element.qp = &nesqp->ibqp; |
2565 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2566 | } | ||
2567 | } | ||
2568 | |||
2569 | if ((cm_id) && (cm_id->event_handler)) { | ||
2570 | if (issue_disconn) { | ||
2527 | atomic_inc(&cm_disconnects); | 2571 | atomic_inc(&cm_disconnects); |
2528 | cm_event.event = IW_CM_EVENT_DISCONNECT; | 2572 | cm_event.event = IW_CM_EVENT_DISCONNECT; |
2529 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { | 2573 | cm_event.status = disconn_status; |
2530 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | ||
2531 | nes_debug(NES_DBG_CM, "Generating a CM " | ||
2532 | "Disconnect Event (status reset) for " | ||
2533 | "QP%u, cm_id = %p. \n", | ||
2534 | nesqp->hwqp.qp_id, cm_id); | ||
2535 | } else | ||
2536 | cm_event.status = IW_CM_EVENT_STATUS_OK; | ||
2537 | |||
2538 | cm_event.local_addr = cm_id->local_addr; | 2574 | cm_event.local_addr = cm_id->local_addr; |
2539 | cm_event.remote_addr = cm_id->remote_addr; | 2575 | cm_event.remote_addr = cm_id->remote_addr; |
2540 | cm_event.private_data = NULL; | 2576 | cm_event.private_data = NULL; |
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2547 | nesqp->hwqp.sq_tail, cm_id, | 2583 | nesqp->hwqp.sq_tail, cm_id, |
2548 | atomic_read(&nesqp->refcount)); | 2584 | atomic_read(&nesqp->refcount)); |
2549 | 2585 | ||
2550 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2551 | ret = cm_id->event_handler(cm_id, &cm_event); | 2586 | ret = cm_id->event_handler(cm_id, &cm_event); |
2552 | if (ret) | 2587 | if (ret) |
2553 | nes_debug(NES_DBG_CM, "OFA CM event_handler " | 2588 | nes_debug(NES_DBG_CM, "OFA CM event_handler " |
2554 | "returned, ret=%d\n", ret); | 2589 | "returned, ret=%d\n", ret); |
2555 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2556 | } | 2590 | } |
2557 | 2591 | ||
2558 | nesqp->disconn_pending = 0; | 2592 | if (issue_close) { |
2559 | /* There might have been another AE while the lock was released */ | ||
2560 | original_hw_tcp_state = nesqp->hw_tcp_state; | ||
2561 | original_ibqp_state = nesqp->ibqp_state; | ||
2562 | last_ae = nesqp->last_aeq; | ||
2563 | |||
2564 | if ((issued_disconnect_reset == 0) && (nesqp->cm_id) && | ||
2565 | ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | ||
2566 | (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || | ||
2567 | (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || | ||
2568 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2569 | atomic_inc(&cm_closes); | 2593 | atomic_inc(&cm_closes); |
2570 | nesqp->cm_id = NULL; | ||
2571 | nesqp->in_disconnect = 0; | ||
2572 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2573 | nes_disconnect(nesqp, 1); | 2594 | nes_disconnect(nesqp, 1); |
2574 | 2595 | ||
2575 | cm_id->provider_data = nesqp; | 2596 | cm_id->provider_data = nesqp; |
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2588 | } | 2609 | } |
2589 | 2610 | ||
2590 | cm_id->rem_ref(cm_id); | 2611 | cm_id->rem_ref(cm_id); |
2591 | |||
2592 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2593 | if (nesqp->flush_issued == 0) { | ||
2594 | nesqp->flush_issued = 1; | ||
2595 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2596 | flush_wqes(nesvnic->nesdev, nesqp, | ||
2597 | NES_CQP_FLUSH_RQ, 1); | ||
2598 | } else | ||
2599 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2600 | } else { | ||
2601 | cm_id = nesqp->cm_id; | ||
2602 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2603 | /* check to see if the inbound reset beat the outbound reset */ | ||
2604 | if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) { | ||
2605 | nes_debug(NES_DBG_CM, "QP%u: Decing refcount " | ||
2606 | "due to inbound reset beating the " | ||
2607 | "outbound reset.\n", nesqp->hwqp.qp_id); | ||
2608 | } | ||
2609 | } | 2612 | } |
2610 | } else { | ||
2611 | nesqp->disconn_pending = 0; | ||
2612 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | return 0; | 2615 | return 0; |
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 8b7e7c0e496e..90e8e4d8a5ce 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -410,8 +410,6 @@ struct nes_cm_ops { | |||
410 | int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, | 410 | int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, |
411 | enum nes_timer_type, int, int); | 411 | enum nes_timer_type, int, int); |
412 | 412 | ||
413 | int nes_cm_disconn(struct nes_qp *); | ||
414 | |||
415 | int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); | 413 | int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); |
416 | int nes_reject(struct iw_cm_id *, const void *, u8); | 414 | int nes_reject(struct iw_cm_id *, const void *, u8); |
417 | int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); | 415 | int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 97d4c2a33ed6..3512d6de3019 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
74 | static void process_critical_error(struct nes_device *nesdev); | 74 | static void process_critical_error(struct nes_device *nesdev); |
75 | static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); | 75 | static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); |
76 | static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); | 76 | static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); |
77 | static void nes_terminate_timeout(unsigned long context); | ||
78 | static void nes_terminate_start_timer(struct nes_qp *nesqp); | ||
77 | 79 | ||
78 | #ifdef CONFIG_INFINIBAND_NES_DEBUG | 80 | #ifdef CONFIG_INFINIBAND_NES_DEBUG |
79 | static unsigned char *nes_iwarp_state_str[] = { | 81 | static unsigned char *nes_iwarp_state_str[] = { |
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
2903 | } | 2905 | } |
2904 | 2906 | ||
2905 | 2907 | ||
2908 | static u8 *locate_mpa(u8 *pkt, u32 aeq_info) | ||
2909 | { | ||
2910 | u16 pkt_len; | ||
2911 | |||
2912 | if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { | ||
2913 | /* skip over ethernet header */ | ||
2914 | pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2)); | ||
2915 | pkt += ETH_HLEN; | ||
2916 | |||
2917 | /* Skip over IP and TCP headers */ | ||
2918 | pkt += 4 * (pkt[0] & 0x0f); | ||
2919 | pkt += 4 * ((pkt[12] >> 4) & 0x0f); | ||
2920 | } | ||
2921 | return pkt; | ||
2922 | } | ||
2923 | |||
2924 | /* Determine if incoming error pkt is rdma layer */ | ||
2925 | static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info) | ||
2926 | { | ||
2927 | u8 *pkt; | ||
2928 | u16 *mpa; | ||
2929 | u32 opcode = 0xffffffff; | ||
2930 | |||
2931 | if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { | ||
2932 | pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; | ||
2933 | mpa = (u16 *)locate_mpa(pkt, aeq_info); | ||
2934 | opcode = be16_to_cpu(mpa[1]) & 0xf; | ||
2935 | } | ||
2936 | |||
2937 | return opcode; | ||
2938 | } | ||
2939 | |||
2940 | /* Build iWARP terminate header */ | ||
2941 | static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info) | ||
2942 | { | ||
2943 | u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; | ||
2944 | u16 ddp_seg_len; | ||
2945 | int copy_len = 0; | ||
2946 | u8 is_tagged = 0; | ||
2947 | u8 flush_code = 0; | ||
2948 | struct nes_terminate_hdr *termhdr; | ||
2949 | |||
2950 | termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase; | ||
2951 | memset(termhdr, 0, 64); | ||
2952 | |||
2953 | if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { | ||
2954 | |||
2955 | /* Use data from offending packet to fill in ddp & rdma hdrs */ | ||
2956 | pkt = locate_mpa(pkt, aeq_info); | ||
2957 | ddp_seg_len = be16_to_cpu(*(u16 *)pkt); | ||
2958 | if (ddp_seg_len) { | ||
2959 | copy_len = 2; | ||
2960 | termhdr->hdrct = DDP_LEN_FLAG; | ||
2961 | if (pkt[2] & 0x80) { | ||
2962 | is_tagged = 1; | ||
2963 | if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { | ||
2964 | copy_len += TERM_DDP_LEN_TAGGED; | ||
2965 | termhdr->hdrct |= DDP_HDR_FLAG; | ||
2966 | } | ||
2967 | } else { | ||
2968 | if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { | ||
2969 | copy_len += TERM_DDP_LEN_UNTAGGED; | ||
2970 | termhdr->hdrct |= DDP_HDR_FLAG; | ||
2971 | } | ||
2972 | |||
2973 | if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { | ||
2974 | if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { | ||
2975 | copy_len += TERM_RDMA_LEN; | ||
2976 | termhdr->hdrct |= RDMA_HDR_FLAG; | ||
2977 | } | ||
2978 | } | ||
2979 | } | ||
2980 | } | ||
2981 | } | ||
2982 | |||
2983 | switch (async_event_id) { | ||
2984 | case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: | ||
2985 | switch (iwarp_opcode(nesqp, aeq_info)) { | ||
2986 | case IWARP_OPCODE_WRITE: | ||
2987 | flush_code = IB_WC_LOC_PROT_ERR; | ||
2988 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; | ||
2989 | termhdr->error_code = DDP_TAGGED_INV_STAG; | ||
2990 | break; | ||
2991 | default: | ||
2992 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
2993 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
2994 | termhdr->error_code = RDMAP_INV_STAG; | ||
2995 | } | ||
2996 | break; | ||
2997 | case NES_AEQE_AEID_AMP_INVALID_STAG: | ||
2998 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
2999 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3000 | termhdr->error_code = RDMAP_INV_STAG; | ||
3001 | break; | ||
3002 | case NES_AEQE_AEID_AMP_BAD_QP: | ||
3003 | flush_code = IB_WC_LOC_QP_OP_ERR; | ||
3004 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3005 | termhdr->error_code = DDP_UNTAGGED_INV_QN; | ||
3006 | break; | ||
3007 | case NES_AEQE_AEID_AMP_BAD_STAG_KEY: | ||
3008 | case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: | ||
3009 | switch (iwarp_opcode(nesqp, aeq_info)) { | ||
3010 | case IWARP_OPCODE_SEND_INV: | ||
3011 | case IWARP_OPCODE_SEND_SE_INV: | ||
3012 | flush_code = IB_WC_REM_OP_ERR; | ||
3013 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; | ||
3014 | termhdr->error_code = RDMAP_CANT_INV_STAG; | ||
3015 | break; | ||
3016 | default: | ||
3017 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3018 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3019 | termhdr->error_code = RDMAP_INV_STAG; | ||
3020 | } | ||
3021 | break; | ||
3022 | case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: | ||
3023 | if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) { | ||
3024 | flush_code = IB_WC_LOC_PROT_ERR; | ||
3025 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; | ||
3026 | termhdr->error_code = DDP_TAGGED_BOUNDS; | ||
3027 | } else { | ||
3028 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3029 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3030 | termhdr->error_code = RDMAP_INV_BOUNDS; | ||
3031 | } | ||
3032 | break; | ||
3033 | case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: | ||
3034 | case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: | ||
3035 | case NES_AEQE_AEID_PRIV_OPERATION_DENIED: | ||
3036 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3037 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3038 | termhdr->error_code = RDMAP_ACCESS; | ||
3039 | break; | ||
3040 | case NES_AEQE_AEID_AMP_TO_WRAP: | ||
3041 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3042 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3043 | termhdr->error_code = RDMAP_TO_WRAP; | ||
3044 | break; | ||
3045 | case NES_AEQE_AEID_AMP_BAD_PD: | ||
3046 | switch (iwarp_opcode(nesqp, aeq_info)) { | ||
3047 | case IWARP_OPCODE_WRITE: | ||
3048 | flush_code = IB_WC_LOC_PROT_ERR; | ||
3049 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; | ||
3050 | termhdr->error_code = DDP_TAGGED_UNASSOC_STAG; | ||
3051 | break; | ||
3052 | case IWARP_OPCODE_SEND_INV: | ||
3053 | case IWARP_OPCODE_SEND_SE_INV: | ||
3054 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3055 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3056 | termhdr->error_code = RDMAP_CANT_INV_STAG; | ||
3057 | break; | ||
3058 | default: | ||
3059 | flush_code = IB_WC_REM_ACCESS_ERR; | ||
3060 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; | ||
3061 | termhdr->error_code = RDMAP_UNASSOC_STAG; | ||
3062 | } | ||
3063 | break; | ||
3064 | case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: | ||
3065 | flush_code = IB_WC_LOC_LEN_ERR; | ||
3066 | termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; | ||
3067 | termhdr->error_code = MPA_MARKER; | ||
3068 | break; | ||
3069 | case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: | ||
3070 | flush_code = IB_WC_GENERAL_ERR; | ||
3071 | termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; | ||
3072 | termhdr->error_code = MPA_CRC; | ||
3073 | break; | ||
3074 | case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: | ||
3075 | case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: | ||
3076 | flush_code = IB_WC_LOC_LEN_ERR; | ||
3077 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; | ||
3078 | termhdr->error_code = DDP_CATASTROPHIC_LOCAL; | ||
3079 | break; | ||
3080 | case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: | ||
3081 | case NES_AEQE_AEID_DDP_NO_L_BIT: | ||
3082 | flush_code = IB_WC_FATAL_ERR; | ||
3083 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; | ||
3084 | termhdr->error_code = DDP_CATASTROPHIC_LOCAL; | ||
3085 | break; | ||
3086 | case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: | ||
3087 | case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: | ||
3088 | flush_code = IB_WC_GENERAL_ERR; | ||
3089 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3090 | termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE; | ||
3091 | break; | ||
3092 | case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: | ||
3093 | flush_code = IB_WC_LOC_LEN_ERR; | ||
3094 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3095 | termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG; | ||
3096 | break; | ||
3097 | case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: | ||
3098 | flush_code = IB_WC_GENERAL_ERR; | ||
3099 | if (is_tagged) { | ||
3100 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; | ||
3101 | termhdr->error_code = DDP_TAGGED_INV_DDP_VER; | ||
3102 | } else { | ||
3103 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3104 | termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER; | ||
3105 | } | ||
3106 | break; | ||
3107 | case NES_AEQE_AEID_DDP_UBE_INVALID_MO: | ||
3108 | flush_code = IB_WC_GENERAL_ERR; | ||
3109 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3110 | termhdr->error_code = DDP_UNTAGGED_INV_MO; | ||
3111 | break; | ||
3112 | case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: | ||
3113 | flush_code = IB_WC_REM_OP_ERR; | ||
3114 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3115 | termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF; | ||
3116 | break; | ||
3117 | case NES_AEQE_AEID_DDP_UBE_INVALID_QN: | ||
3118 | flush_code = IB_WC_GENERAL_ERR; | ||
3119 | termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; | ||
3120 | termhdr->error_code = DDP_UNTAGGED_INV_QN; | ||
3121 | break; | ||
3122 | case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: | ||
3123 | flush_code = IB_WC_GENERAL_ERR; | ||
3124 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; | ||
3125 | termhdr->error_code = RDMAP_INV_RDMAP_VER; | ||
3126 | break; | ||
3127 | case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: | ||
3128 | flush_code = IB_WC_LOC_QP_OP_ERR; | ||
3129 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; | ||
3130 | termhdr->error_code = RDMAP_UNEXPECTED_OP; | ||
3131 | break; | ||
3132 | default: | ||
3133 | flush_code = IB_WC_FATAL_ERR; | ||
3134 | termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; | ||
3135 | termhdr->error_code = RDMAP_UNSPECIFIED; | ||
3136 | break; | ||
3137 | } | ||
3138 | |||
3139 | if (copy_len) | ||
3140 | memcpy(termhdr + 1, pkt, copy_len); | ||
3141 | |||
3142 | if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) { | ||
3143 | if (aeq_info & NES_AEQE_SQ) | ||
3144 | nesqp->term_sq_flush_code = flush_code; | ||
3145 | else | ||
3146 | nesqp->term_rq_flush_code = flush_code; | ||
3147 | } | ||
3148 | |||
3149 | return sizeof(struct nes_terminate_hdr) + copy_len; | ||
3150 | } | ||
3151 | |||
3152 | static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp, | ||
3153 | struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype) | ||
3154 | { | ||
3155 | u64 context; | ||
3156 | unsigned long flags; | ||
3157 | u32 aeq_info; | ||
3158 | u16 async_event_id; | ||
3159 | u8 tcp_state; | ||
3160 | u8 iwarp_state; | ||
3161 | u32 termlen = 0; | ||
3162 | u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE | | ||
3163 | NES_CQP_QP_TERM_DONT_SEND_FIN; | ||
3164 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
3165 | |||
3166 | if (nesqp->term_flags & NES_TERM_SENT) | ||
3167 | return; /* Sanity check */ | ||
3168 | |||
3169 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | ||
3170 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; | ||
3171 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; | ||
3172 | async_event_id = (u16)aeq_info; | ||
3173 | |||
3174 | context = (unsigned long)nesadapter->qp_table[le32_to_cpu( | ||
3175 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; | ||
3176 | if (!context) { | ||
3177 | WARN_ON(!context); | ||
3178 | return; | ||
3179 | } | ||
3180 | |||
3181 | nesqp = (struct nes_qp *)(unsigned long)context; | ||
3182 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3183 | nesqp->hw_iwarp_state = iwarp_state; | ||
3184 | nesqp->hw_tcp_state = tcp_state; | ||
3185 | nesqp->last_aeq = async_event_id; | ||
3186 | nesqp->terminate_eventtype = eventtype; | ||
3187 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3188 | |||
3189 | if (nesadapter->send_term_ok) | ||
3190 | termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info); | ||
3191 | else | ||
3192 | mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; | ||
3193 | |||
3194 | nes_terminate_start_timer(nesqp); | ||
3195 | nesqp->term_flags |= NES_TERM_SENT; | ||
3196 | nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); | ||
3197 | } | ||
3198 | |||
3199 | static void nes_terminate_send_fin(struct nes_device *nesdev, | ||
3200 | struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) | ||
3201 | { | ||
3202 | u32 aeq_info; | ||
3203 | u16 async_event_id; | ||
3204 | u8 tcp_state; | ||
3205 | u8 iwarp_state; | ||
3206 | unsigned long flags; | ||
3207 | |||
3208 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | ||
3209 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; | ||
3210 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; | ||
3211 | async_event_id = (u16)aeq_info; | ||
3212 | |||
3213 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3214 | nesqp->hw_iwarp_state = iwarp_state; | ||
3215 | nesqp->hw_tcp_state = tcp_state; | ||
3216 | nesqp->last_aeq = async_event_id; | ||
3217 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3218 | |||
3219 | /* Send the fin only */ | ||
3220 | nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE | | ||
3221 | NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0); | ||
3222 | } | ||
3223 | |||
3224 | /* Cleanup after a terminate sent or received */ | ||
3225 | static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred) | ||
3226 | { | ||
3227 | u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; | ||
3228 | unsigned long flags; | ||
3229 | struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
3230 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3231 | u8 first_time = 0; | ||
3232 | |||
3233 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3234 | if (nesqp->hte_added) { | ||
3235 | nesqp->hte_added = 0; | ||
3236 | next_iwarp_state |= NES_CQP_QP_DEL_HTE; | ||
3237 | } | ||
3238 | |||
3239 | first_time = (nesqp->term_flags & NES_TERM_DONE) == 0; | ||
3240 | nesqp->term_flags |= NES_TERM_DONE; | ||
3241 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3242 | |||
3243 | /* Make sure we go through this only once */ | ||
3244 | if (first_time) { | ||
3245 | if (timeout_occurred == 0) | ||
3246 | del_timer(&nesqp->terminate_timer); | ||
3247 | else | ||
3248 | next_iwarp_state |= NES_CQP_QP_RESET; | ||
3249 | |||
3250 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); | ||
3251 | nes_cm_disconn(nesqp); | ||
3252 | } | ||
3253 | } | ||
3254 | |||
3255 | static void nes_terminate_received(struct nes_device *nesdev, | ||
3256 | struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) | ||
3257 | { | ||
3258 | u32 aeq_info; | ||
3259 | u8 *pkt; | ||
3260 | u32 *mpa; | ||
3261 | u8 ddp_ctl; | ||
3262 | u8 rdma_ctl; | ||
3263 | u16 aeq_id = 0; | ||
3264 | |||
3265 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | ||
3266 | if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { | ||
3267 | /* Terminate is not a performance path so the silicon */ | ||
3268 | /* did not validate the frame - do it now */ | ||
3269 | pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; | ||
3270 | mpa = (u32 *)locate_mpa(pkt, aeq_info); | ||
3271 | ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff; | ||
3272 | rdma_ctl = be32_to_cpu(mpa[0]) & 0xff; | ||
3273 | if ((ddp_ctl & 0xc0) != 0x40) | ||
3274 | aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC; | ||
3275 | else if ((ddp_ctl & 0x03) != 1) | ||
3276 | aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION; | ||
3277 | else if (be32_to_cpu(mpa[2]) != 2) | ||
3278 | aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN; | ||
3279 | else if (be32_to_cpu(mpa[3]) != 1) | ||
3280 | aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN; | ||
3281 | else if (be32_to_cpu(mpa[4]) != 0) | ||
3282 | aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO; | ||
3283 | else if ((rdma_ctl & 0xc0) != 0x40) | ||
3284 | aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION; | ||
3285 | |||
3286 | if (aeq_id) { | ||
3287 | /* Bad terminate recvd - send back a terminate */ | ||
3288 | aeq_info = (aeq_info & 0xffff0000) | aeq_id; | ||
3289 | aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); | ||
3290 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); | ||
3291 | return; | ||
3292 | } | ||
3293 | } | ||
3294 | |||
3295 | nesqp->term_flags |= NES_TERM_RCVD; | ||
3296 | nesqp->terminate_eventtype = IB_EVENT_QP_FATAL; | ||
3297 | nes_terminate_start_timer(nesqp); | ||
3298 | nes_terminate_send_fin(nesdev, nesqp, aeqe); | ||
3299 | } | ||
3300 | |||
3301 | /* Timeout routine in case terminate fails to complete */ | ||
3302 | static void nes_terminate_timeout(unsigned long context) | ||
3303 | { | ||
3304 | struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; | ||
3305 | |||
3306 | nes_terminate_done(nesqp, 1); | ||
3307 | } | ||
3308 | |||
3309 | /* Set a timer in case hw cannot complete the terminate sequence */ | ||
3310 | static void nes_terminate_start_timer(struct nes_qp *nesqp) | ||
3311 | { | ||
3312 | init_timer(&nesqp->terminate_timer); | ||
3313 | nesqp->terminate_timer.function = nes_terminate_timeout; | ||
3314 | nesqp->terminate_timer.expires = jiffies + HZ; | ||
3315 | nesqp->terminate_timer.data = (unsigned long)nesqp; | ||
3316 | add_timer(&nesqp->terminate_timer); | ||
3317 | } | ||
3318 | |||
2906 | /** | 3319 | /** |
2907 | * nes_process_iwarp_aeqe | 3320 | * nes_process_iwarp_aeqe |
2908 | */ | 3321 | */ |
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
2910 | struct nes_hw_aeqe *aeqe) | 3323 | struct nes_hw_aeqe *aeqe) |
2911 | { | 3324 | { |
2912 | u64 context; | 3325 | u64 context; |
2913 | u64 aeqe_context = 0; | ||
2914 | unsigned long flags; | 3326 | unsigned long flags; |
2915 | struct nes_qp *nesqp; | 3327 | struct nes_qp *nesqp; |
3328 | struct nes_hw_cq *hw_cq; | ||
3329 | struct nes_cq *nescq; | ||
2916 | int resource_allocated; | 3330 | int resource_allocated; |
2917 | /* struct iw_cm_id *cm_id; */ | ||
2918 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 3331 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
2919 | struct ib_event ibevent; | ||
2920 | /* struct iw_cm_event cm_event; */ | ||
2921 | u32 aeq_info; | 3332 | u32 aeq_info; |
2922 | u32 next_iwarp_state = 0; | 3333 | u32 next_iwarp_state = 0; |
2923 | u16 async_event_id; | 3334 | u16 async_event_id; |
2924 | u8 tcp_state; | 3335 | u8 tcp_state; |
2925 | u8 iwarp_state; | 3336 | u8 iwarp_state; |
3337 | int must_disconn = 1; | ||
3338 | int must_terminate = 0; | ||
3339 | struct ib_event ibevent; | ||
2926 | 3340 | ||
2927 | nes_debug(NES_DBG_AEQ, "\n"); | 3341 | nes_debug(NES_DBG_AEQ, "\n"); |
2928 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | 3342 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); |
2929 | if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { | 3343 | if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) { |
2930 | context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); | 3344 | context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); |
2931 | context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; | 3345 | context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; |
2932 | } else { | 3346 | } else { |
2933 | aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); | ||
2934 | aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; | ||
2935 | context = (unsigned long)nesadapter->qp_table[le32_to_cpu( | 3347 | context = (unsigned long)nesadapter->qp_table[le32_to_cpu( |
2936 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; | 3348 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; |
2937 | BUG_ON(!context); | 3349 | BUG_ON(!context); |
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
2948 | 3360 | ||
2949 | switch (async_event_id) { | 3361 | switch (async_event_id) { |
2950 | case NES_AEQE_AEID_LLP_FIN_RECEIVED: | 3362 | case NES_AEQE_AEID_LLP_FIN_RECEIVED: |
2951 | nesqp = *((struct nes_qp **)&context); | 3363 | nesqp = (struct nes_qp *)(unsigned long)context; |
3364 | |||
3365 | if (nesqp->term_flags) | ||
3366 | return; /* Ignore it, wait for close complete */ | ||
3367 | |||
2952 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | 3368 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { |
2953 | nesqp->cm_id->add_ref(nesqp->cm_id); | 3369 | nesqp->cm_id->add_ref(nesqp->cm_id); |
2954 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, | 3370 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, |
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
2959 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | 3375 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), |
2960 | async_event_id, nesqp->last_aeq, tcp_state); | 3376 | async_event_id, nesqp->last_aeq, tcp_state); |
2961 | } | 3377 | } |
3378 | |||
2962 | if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || | 3379 | if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || |
2963 | (nesqp->ibqp_state != IB_QPS_RTS)) { | 3380 | (nesqp->ibqp_state != IB_QPS_RTS)) { |
2964 | /* FIN Received but tcp state or IB state moved on, | 3381 | /* FIN Received but tcp state or IB state moved on, |
2965 | should expect a close complete */ | 3382 | should expect a close complete */ |
2966 | return; | 3383 | return; |
2967 | } | 3384 | } |
3385 | |||
2968 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: | 3386 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: |
3387 | nesqp = (struct nes_qp *)(unsigned long)context; | ||
3388 | if (nesqp->term_flags) { | ||
3389 | nes_terminate_done(nesqp, 0); | ||
3390 | return; | ||
3391 | } | ||
3392 | |||
2969 | case NES_AEQE_AEID_LLP_CONNECTION_RESET: | 3393 | case NES_AEQE_AEID_LLP_CONNECTION_RESET: |
2970 | case NES_AEQE_AEID_TERMINATE_SENT: | ||
2971 | case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: | ||
2972 | case NES_AEQE_AEID_RESET_SENT: | 3394 | case NES_AEQE_AEID_RESET_SENT: |
2973 | nesqp = *((struct nes_qp **)&context); | 3395 | nesqp = (struct nes_qp *)(unsigned long)context; |
2974 | if (async_event_id == NES_AEQE_AEID_RESET_SENT) { | 3396 | if (async_event_id == NES_AEQE_AEID_RESET_SENT) { |
2975 | tcp_state = NES_AEQE_TCP_STATE_CLOSED; | 3397 | tcp_state = NES_AEQE_TCP_STATE_CLOSED; |
2976 | } | 3398 | } |
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
2982 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | 3404 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || |
2983 | (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { | 3405 | (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { |
2984 | nesqp->hte_added = 0; | 3406 | nesqp->hte_added = 0; |
2985 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3407 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE; |
2986 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n", | ||
2987 | nesqp->hwqp.qp_id); | ||
2988 | nes_hw_modify_qp(nesdev, nesqp, | ||
2989 | NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0); | ||
2990 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2991 | } | 3408 | } |
2992 | 3409 | ||
2993 | if ((nesqp->ibqp_state == IB_QPS_RTS) && | 3410 | if ((nesqp->ibqp_state == IB_QPS_RTS) && |
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
2999 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | 3416 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; |
3000 | break; | 3417 | break; |
3001 | case NES_AEQE_IWARP_STATE_TERMINATE: | 3418 | case NES_AEQE_IWARP_STATE_TERMINATE: |
3002 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; | 3419 | must_disconn = 0; /* terminate path takes care of disconn */ |
3003 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; | 3420 | if (nesqp->term_flags == 0) |
3004 | if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { | 3421 | must_terminate = 1; |
3005 | next_iwarp_state |= 0x02000000; | ||
3006 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
3007 | } | ||
3008 | break; | 3422 | break; |
3009 | default: | ||
3010 | next_iwarp_state = 0; | ||
3011 | } | ||
3012 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3013 | if (next_iwarp_state) { | ||
3014 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," | ||
3015 | " also added another reference\n", | ||
3016 | nesqp->hwqp.qp_id, next_iwarp_state); | ||
3017 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
3018 | } | 3423 | } |
3019 | nes_cm_disconn(nesqp); | ||
3020 | } else { | 3424 | } else { |
3021 | if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { | 3425 | if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { |
3022 | /* FIN Received but ib state not RTS, | 3426 | /* FIN Received but ib state not RTS, |
3023 | close complete will be on its way */ | 3427 | close complete will be on its way */ |
3024 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3428 | must_disconn = 0; |
3025 | return; | ||
3026 | } | ||
3027 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3028 | if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { | ||
3029 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000; | ||
3030 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
3031 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," | ||
3032 | " also added another reference\n", | ||
3033 | nesqp->hwqp.qp_id, next_iwarp_state); | ||
3034 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
3035 | } | 3429 | } |
3036 | nes_cm_disconn(nesqp); | ||
3037 | } | 3430 | } |
3038 | break; | ||
3039 | case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: | ||
3040 | nesqp = *((struct nes_qp **)&context); | ||
3041 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3042 | nesqp->hw_iwarp_state = iwarp_state; | ||
3043 | nesqp->hw_tcp_state = tcp_state; | ||
3044 | nesqp->last_aeq = async_event_id; | ||
3045 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3431 | spin_unlock_irqrestore(&nesqp->lock, flags); |
3046 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" | 3432 | |
3047 | " event on QP%u \n Q2 Data:\n", | 3433 | if (must_terminate) |
3048 | nesqp->hwqp.qp_id); | 3434 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); |
3049 | if (nesqp->ibqp.event_handler) { | 3435 | else if (must_disconn) { |
3050 | ibevent.device = nesqp->ibqp.device; | 3436 | if (next_iwarp_state) { |
3051 | ibevent.element.qp = &nesqp->ibqp; | 3437 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n", |
3052 | ibevent.event = IB_EVENT_QP_FATAL; | 3438 | nesqp->hwqp.qp_id, next_iwarp_state); |
3053 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | 3439 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); |
3054 | } | 3440 | } |
3055 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
3056 | ((nesqp->ibqp_state == IB_QPS_RTS)&& | ||
3057 | (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
3058 | nes_cm_disconn(nesqp); | 3441 | nes_cm_disconn(nesqp); |
3059 | } else { | ||
3060 | nesqp->in_disconnect = 0; | ||
3061 | wake_up(&nesqp->kick_waitq); | ||
3062 | } | 3442 | } |
3063 | break; | 3443 | break; |
3064 | case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: | 3444 | |
3065 | nesqp = *((struct nes_qp **)&context); | 3445 | case NES_AEQE_AEID_TERMINATE_SENT: |
3066 | spin_lock_irqsave(&nesqp->lock, flags); | 3446 | nesqp = (struct nes_qp *)(unsigned long)context; |
3067 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; | 3447 | nes_terminate_send_fin(nesdev, nesqp, aeqe); |
3068 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
3069 | nesqp->last_aeq = async_event_id; | ||
3070 | if (nesqp->cm_id) { | ||
3071 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" | ||
3072 | " event on QP%u, remote IP = 0x%08X \n", | ||
3073 | nesqp->hwqp.qp_id, | ||
3074 | ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr)); | ||
3075 | } else { | ||
3076 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" | ||
3077 | " event on QP%u \n", | ||
3078 | nesqp->hwqp.qp_id); | ||
3079 | } | ||
3080 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3081 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET; | ||
3082 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
3083 | if (nesqp->ibqp.event_handler) { | ||
3084 | ibevent.device = nesqp->ibqp.device; | ||
3085 | ibevent.element.qp = &nesqp->ibqp; | ||
3086 | ibevent.event = IB_EVENT_QP_FATAL; | ||
3087 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3088 | } | ||
3089 | break; | 3448 | break; |
3090 | case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: | 3449 | |
3091 | if (NES_AEQE_INBOUND_RDMA&aeq_info) { | 3450 | case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: |
3092 | nesqp = nesadapter->qp_table[le32_to_cpu( | 3451 | nesqp = (struct nes_qp *)(unsigned long)context; |
3093 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | 3452 | nes_terminate_received(nesdev, nesqp, aeqe); |
3094 | } else { | ||
3095 | /* TODO: get the actual WQE and mask off wqe index */ | ||
3096 | context &= ~((u64)511); | ||
3097 | nesqp = *((struct nes_qp **)&context); | ||
3098 | } | ||
3099 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3100 | nesqp->hw_iwarp_state = iwarp_state; | ||
3101 | nesqp->hw_tcp_state = tcp_state; | ||
3102 | nesqp->last_aeq = async_event_id; | ||
3103 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3104 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n", | ||
3105 | nesqp->hwqp.qp_id); | ||
3106 | if (nesqp->ibqp.event_handler) { | ||
3107 | ibevent.device = nesqp->ibqp.device; | ||
3108 | ibevent.element.qp = &nesqp->ibqp; | ||
3109 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
3110 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3111 | } | ||
3112 | break; | 3453 | break; |
3454 | |||
3455 | case NES_AEQE_AEID_AMP_BAD_STAG_KEY: | ||
3456 | case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: | ||
3113 | case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: | 3457 | case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: |
3114 | nesqp = *((struct nes_qp **)&context); | 3458 | case NES_AEQE_AEID_AMP_INVALID_STAG: |
3115 | spin_lock_irqsave(&nesqp->lock, flags); | 3459 | case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: |
3116 | nesqp->hw_iwarp_state = iwarp_state; | 3460 | case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: |
3117 | nesqp->hw_tcp_state = tcp_state; | ||
3118 | nesqp->last_aeq = async_event_id; | ||
3119 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3120 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n", | ||
3121 | nesqp->hwqp.qp_id); | ||
3122 | if (nesqp->ibqp.event_handler) { | ||
3123 | ibevent.device = nesqp->ibqp.device; | ||
3124 | ibevent.element.qp = &nesqp->ibqp; | ||
3125 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
3126 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3127 | } | ||
3128 | break; | ||
3129 | case NES_AEQE_AEID_PRIV_OPERATION_DENIED: | 3461 | case NES_AEQE_AEID_PRIV_OPERATION_DENIED: |
3130 | nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words | 3462 | case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: |
3131 | [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | 3463 | case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: |
3132 | spin_lock_irqsave(&nesqp->lock, flags); | 3464 | case NES_AEQE_AEID_AMP_TO_WRAP: |
3133 | nesqp->hw_iwarp_state = iwarp_state; | 3465 | nesqp = (struct nes_qp *)(unsigned long)context; |
3134 | nesqp->hw_tcp_state = tcp_state; | 3466 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); |
3135 | nesqp->last_aeq = async_event_id; | 3467 | break; |
3136 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3468 | |
3137 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," | 3469 | case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: |
3138 | " nesqp = %p, AE reported %p\n", | 3470 | case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: |
3139 | nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); | 3471 | case NES_AEQE_AEID_DDP_UBE_INVALID_MO: |
3140 | if (nesqp->ibqp.event_handler) { | 3472 | case NES_AEQE_AEID_DDP_UBE_INVALID_QN: |
3141 | ibevent.device = nesqp->ibqp.device; | 3473 | nesqp = (struct nes_qp *)(unsigned long)context; |
3142 | ibevent.element.qp = &nesqp->ibqp; | 3474 | if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { |
3143 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | 3475 | aeq_info &= 0xffff0000; |
3144 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | 3476 | aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; |
3477 | aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); | ||
3145 | } | 3478 | } |
3479 | |||
3480 | case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: | ||
3481 | case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: | ||
3482 | case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: | ||
3483 | case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: | ||
3484 | case NES_AEQE_AEID_AMP_BAD_QP: | ||
3485 | case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: | ||
3486 | case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: | ||
3487 | case NES_AEQE_AEID_DDP_NO_L_BIT: | ||
3488 | case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: | ||
3489 | case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: | ||
3490 | case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: | ||
3491 | case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: | ||
3492 | case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: | ||
3493 | case NES_AEQE_AEID_AMP_BAD_PD: | ||
3494 | case NES_AEQE_AEID_AMP_FASTREG_SHARED: | ||
3495 | case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG: | ||
3496 | case NES_AEQE_AEID_AMP_FASTREG_MW_STAG: | ||
3497 | case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS: | ||
3498 | case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW: | ||
3499 | case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH: | ||
3500 | case NES_AEQE_AEID_AMP_INVALIDATE_SHARED: | ||
3501 | case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS: | ||
3502 | case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG: | ||
3503 | case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG: | ||
3504 | case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG: | ||
3505 | case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG: | ||
3506 | case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS: | ||
3507 | case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS: | ||
3508 | case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT: | ||
3509 | case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED: | ||
3510 | case NES_AEQE_AEID_BAD_CLOSE: | ||
3511 | case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO: | ||
3512 | case NES_AEQE_AEID_STAG_ZERO_INVALID: | ||
3513 | case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: | ||
3514 | case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: | ||
3515 | nesqp = (struct nes_qp *)(unsigned long)context; | ||
3516 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); | ||
3146 | break; | 3517 | break; |
3518 | |||
3147 | case NES_AEQE_AEID_CQ_OPERATION_ERROR: | 3519 | case NES_AEQE_AEID_CQ_OPERATION_ERROR: |
3148 | context <<= 1; | 3520 | context <<= 1; |
3149 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", | 3521 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", |
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3153 | if (resource_allocated) { | 3525 | if (resource_allocated) { |
3154 | printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", | 3526 | printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", |
3155 | __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); | 3527 | __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); |
3528 | hw_cq = (struct nes_hw_cq *)(unsigned long)context; | ||
3529 | if (hw_cq) { | ||
3530 | nescq = container_of(hw_cq, struct nes_cq, hw_cq); | ||
3531 | if (nescq->ibcq.event_handler) { | ||
3532 | ibevent.device = nescq->ibcq.device; | ||
3533 | ibevent.event = IB_EVENT_CQ_ERR; | ||
3534 | ibevent.element.cq = &nescq->ibcq; | ||
3535 | nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context); | ||
3536 | } | ||
3537 | } | ||
3156 | } | 3538 | } |
3157 | break; | 3539 | break; |
3158 | case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: | 3540 | |
3159 | nesqp = nesadapter->qp_table[le32_to_cpu( | ||
3160 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | ||
3161 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3162 | nesqp->hw_iwarp_state = iwarp_state; | ||
3163 | nesqp->hw_tcp_state = tcp_state; | ||
3164 | nesqp->last_aeq = async_event_id; | ||
3165 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3166 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG" | ||
3167 | "_FOR_AVAILABLE_BUFFER event on QP%u\n", | ||
3168 | nesqp->hwqp.qp_id); | ||
3169 | if (nesqp->ibqp.event_handler) { | ||
3170 | ibevent.device = nesqp->ibqp.device; | ||
3171 | ibevent.element.qp = &nesqp->ibqp; | ||
3172 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
3173 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3174 | } | ||
3175 | /* tell cm to disconnect, cm will queue work to thread */ | ||
3176 | nes_cm_disconn(nesqp); | ||
3177 | break; | ||
3178 | case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: | ||
3179 | nesqp = *((struct nes_qp **)&context); | ||
3180 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3181 | nesqp->hw_iwarp_state = iwarp_state; | ||
3182 | nesqp->hw_tcp_state = tcp_state; | ||
3183 | nesqp->last_aeq = async_event_id; | ||
3184 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3185 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN" | ||
3186 | "_NO_BUFFER_AVAILABLE event on QP%u\n", | ||
3187 | nesqp->hwqp.qp_id); | ||
3188 | if (nesqp->ibqp.event_handler) { | ||
3189 | ibevent.device = nesqp->ibqp.device; | ||
3190 | ibevent.element.qp = &nesqp->ibqp; | ||
3191 | ibevent.event = IB_EVENT_QP_FATAL; | ||
3192 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3193 | } | ||
3194 | /* tell cm to disconnect, cm will queue work to thread */ | ||
3195 | nes_cm_disconn(nesqp); | ||
3196 | break; | ||
3197 | case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: | ||
3198 | nesqp = *((struct nes_qp **)&context); | ||
3199 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3200 | nesqp->hw_iwarp_state = iwarp_state; | ||
3201 | nesqp->hw_tcp_state = tcp_state; | ||
3202 | nesqp->last_aeq = async_event_id; | ||
3203 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3204 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR" | ||
3205 | " event on QP%u \n Q2 Data:\n", | ||
3206 | nesqp->hwqp.qp_id); | ||
3207 | if (nesqp->ibqp.event_handler) { | ||
3208 | ibevent.device = nesqp->ibqp.device; | ||
3209 | ibevent.element.qp = &nesqp->ibqp; | ||
3210 | ibevent.event = IB_EVENT_QP_FATAL; | ||
3211 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
3212 | } | ||
3213 | /* tell cm to disconnect, cm will queue work to thread */ | ||
3214 | nes_cm_disconn(nesqp); | ||
3215 | break; | ||
3216 | /* TODO: additional AEs need to be here */ | ||
3217 | case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: | ||
3218 | nesqp = *((struct nes_qp **)&context); | ||
3219 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3220 | nesqp->hw_iwarp_state = iwarp_state; | ||
3221 | nesqp->hw_tcp_state = tcp_state; | ||
3222 | nesqp->last_aeq = async_event_id; | ||
3223 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3224 | if (nesqp->ibqp.event_handler) { | ||
3225 | ibevent.device = nesqp->ibqp.device; | ||
3226 | ibevent.element.qp = &nesqp->ibqp; | ||
3227 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
3228 | nesqp->ibqp.event_handler(&ibevent, | ||
3229 | nesqp->ibqp.qp_context); | ||
3230 | } | ||
3231 | nes_cm_disconn(nesqp); | ||
3232 | break; | ||
3233 | default: | 3541 | default: |
3234 | nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", | 3542 | nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", |
3235 | async_event_id); | 3543 | async_event_id); |
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3238 | 3546 | ||
3239 | } | 3547 | } |
3240 | 3548 | ||
3241 | |||
3242 | /** | 3549 | /** |
3243 | * nes_iwarp_ce_handler | 3550 | * nes_iwarp_ce_handler |
3244 | */ | 3551 | */ |
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, | |||
3373 | { | 3680 | { |
3374 | struct nes_cqp_request *cqp_request; | 3681 | struct nes_cqp_request *cqp_request; |
3375 | struct nes_hw_cqp_wqe *cqp_wqe; | 3682 | struct nes_hw_cqp_wqe *cqp_wqe; |
3683 | u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; | ||
3684 | u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; | ||
3376 | int ret; | 3685 | int ret; |
3377 | 3686 | ||
3378 | cqp_request = nes_get_cqp_request(nesdev); | 3687 | cqp_request = nes_get_cqp_request(nesdev); |
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, | |||
3389 | cqp_wqe = &cqp_request->cqp_wqe; | 3698 | cqp_wqe = &cqp_request->cqp_wqe; |
3390 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | 3699 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); |
3391 | 3700 | ||
3701 | /* If wqe in error was identified, set code to be put into cqe */ | ||
3702 | if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) { | ||
3703 | which_wq |= NES_CQP_FLUSH_MAJ_MIN; | ||
3704 | sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code; | ||
3705 | nesqp->term_sq_flush_code = 0; | ||
3706 | } | ||
3707 | |||
3708 | if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) { | ||
3709 | which_wq |= NES_CQP_FLUSH_MAJ_MIN; | ||
3710 | rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code; | ||
3711 | nesqp->term_rq_flush_code = 0; | ||
3712 | } | ||
3713 | |||
3714 | if (which_wq & NES_CQP_FLUSH_MAJ_MIN) { | ||
3715 | cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code); | ||
3716 | cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code); | ||
3717 | } | ||
3718 | |||
3392 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = | 3719 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = |
3393 | cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); | 3720 | cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); |
3394 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); | 3721 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index c3654c6383fe..f28a41ba9fa1 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx { | |||
241 | }; | 241 | }; |
242 | 242 | ||
243 | #define NES_CQP_OP_IWARP_STATE_SHIFT 28 | 243 | #define NES_CQP_OP_IWARP_STATE_SHIFT 28 |
244 | #define NES_CQP_OP_TERMLEN_SHIFT 28 | ||
244 | 245 | ||
245 | enum nes_cqp_qp_bits { | 246 | enum nes_cqp_qp_bits { |
246 | NES_CQP_QP_ARP_VALID = (1<<8), | 247 | NES_CQP_QP_ARP_VALID = (1<<8), |
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits { | |||
265 | NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), | 266 | NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), |
266 | NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), | 267 | NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), |
267 | NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), | 268 | NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), |
269 | NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24), | ||
270 | NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25), | ||
268 | NES_CQP_QP_RESET = (1<<31), | 271 | NES_CQP_QP_RESET = (1<<31), |
269 | }; | 272 | }; |
270 | 273 | ||
271 | enum nes_cqp_qp_wqe_word_idx { | 274 | enum nes_cqp_qp_wqe_word_idx { |
272 | NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, | 275 | NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, |
273 | NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, | 276 | NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, |
277 | NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8, | ||
278 | NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9, | ||
274 | NES_CQP_QP_WQE_NEW_MSS_IDX = 15, | 279 | NES_CQP_QP_WQE_NEW_MSS_IDX = 15, |
275 | }; | 280 | }; |
276 | 281 | ||
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits { | |||
361 | enum nes_cqp_flush_bits { | 366 | enum nes_cqp_flush_bits { |
362 | NES_CQP_FLUSH_SQ = (1<<30), | 367 | NES_CQP_FLUSH_SQ = (1<<30), |
363 | NES_CQP_FLUSH_RQ = (1<<31), | 368 | NES_CQP_FLUSH_RQ = (1<<31), |
369 | NES_CQP_FLUSH_MAJ_MIN = (1<<28), | ||
364 | }; | 370 | }; |
365 | 371 | ||
366 | enum nes_cqe_opcode_bits { | 372 | enum nes_cqe_opcode_bits { |
@@ -633,11 +639,14 @@ enum nes_aeqe_bits { | |||
633 | NES_AEQE_INBOUND_RDMA = (1<<19), | 639 | NES_AEQE_INBOUND_RDMA = (1<<19), |
634 | NES_AEQE_IWARP_STATE_MASK = (7<<20), | 640 | NES_AEQE_IWARP_STATE_MASK = (7<<20), |
635 | NES_AEQE_TCP_STATE_MASK = (0xf<<24), | 641 | NES_AEQE_TCP_STATE_MASK = (0xf<<24), |
642 | NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28), | ||
636 | NES_AEQE_VALID = (1<<31), | 643 | NES_AEQE_VALID = (1<<31), |
637 | }; | 644 | }; |
638 | 645 | ||
639 | #define NES_AEQE_IWARP_STATE_SHIFT 20 | 646 | #define NES_AEQE_IWARP_STATE_SHIFT 20 |
640 | #define NES_AEQE_TCP_STATE_SHIFT 24 | 647 | #define NES_AEQE_TCP_STATE_SHIFT 24 |
648 | #define NES_AEQE_Q2_DATA_ETHERNET (1<<28) | ||
649 | #define NES_AEQE_Q2_DATA_MPA (1<<29) | ||
641 | 650 | ||
642 | enum nes_aeqe_iwarp_state { | 651 | enum nes_aeqe_iwarp_state { |
643 | NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, | 652 | NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, |
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits { | |||
751 | NES_IWARP_SQ_OP_NOP = 12, | 760 | NES_IWARP_SQ_OP_NOP = 12, |
752 | }; | 761 | }; |
753 | 762 | ||
763 | enum nes_iwarp_cqe_major_code { | ||
764 | NES_IWARP_CQE_MAJOR_FLUSH = 1, | ||
765 | NES_IWARP_CQE_MAJOR_DRV = 0x8000 | ||
766 | }; | ||
767 | |||
768 | enum nes_iwarp_cqe_minor_code { | ||
769 | NES_IWARP_CQE_MINOR_FLUSH = 1 | ||
770 | }; | ||
771 | |||
754 | #define NES_EEPROM_READ_REQUEST (1<<16) | 772 | #define NES_EEPROM_READ_REQUEST (1<<16) |
755 | #define NES_MAC_ADDR_VALID (1<<20) | 773 | #define NES_MAC_ADDR_VALID (1<<20) |
756 | 774 | ||
@@ -1119,6 +1137,7 @@ struct nes_adapter { | |||
1119 | u8 netdev_max; /* from host nic address count in EEPROM */ | 1137 | u8 netdev_max; /* from host nic address count in EEPROM */ |
1120 | u8 port_count; | 1138 | u8 port_count; |
1121 | u8 virtwq; | 1139 | u8 virtwq; |
1140 | u8 send_term_ok; | ||
1122 | u8 et_use_adaptive_rx_coalesce; | 1141 | u8 et_use_adaptive_rx_coalesce; |
1123 | u8 adapter_fcn_count; | 1142 | u8 adapter_fcn_count; |
1124 | u8 pft_mcast_map[NES_PFT_SIZE]; | 1143 | u8 pft_mcast_map[NES_PFT_SIZE]; |
@@ -1217,6 +1236,90 @@ struct nes_ib_device { | |||
1217 | u32 num_pd; | 1236 | u32 num_pd; |
1218 | }; | 1237 | }; |
1219 | 1238 | ||
1239 | enum nes_hdrct_flags { | ||
1240 | DDP_LEN_FLAG = 0x80, | ||
1241 | DDP_HDR_FLAG = 0x40, | ||
1242 | RDMA_HDR_FLAG = 0x20 | ||
1243 | }; | ||
1244 | |||
1245 | enum nes_term_layers { | ||
1246 | LAYER_RDMA = 0, | ||
1247 | LAYER_DDP = 1, | ||
1248 | LAYER_MPA = 2 | ||
1249 | }; | ||
1250 | |||
1251 | enum nes_term_error_types { | ||
1252 | RDMAP_CATASTROPHIC = 0, | ||
1253 | RDMAP_REMOTE_PROT = 1, | ||
1254 | RDMAP_REMOTE_OP = 2, | ||
1255 | DDP_CATASTROPHIC = 0, | ||
1256 | DDP_TAGGED_BUFFER = 1, | ||
1257 | DDP_UNTAGGED_BUFFER = 2, | ||
1258 | DDP_LLP = 3 | ||
1259 | }; | ||
1260 | |||
1261 | enum nes_term_rdma_errors { | ||
1262 | RDMAP_INV_STAG = 0x00, | ||
1263 | RDMAP_INV_BOUNDS = 0x01, | ||
1264 | RDMAP_ACCESS = 0x02, | ||
1265 | RDMAP_UNASSOC_STAG = 0x03, | ||
1266 | RDMAP_TO_WRAP = 0x04, | ||
1267 | RDMAP_INV_RDMAP_VER = 0x05, | ||
1268 | RDMAP_UNEXPECTED_OP = 0x06, | ||
1269 | RDMAP_CATASTROPHIC_LOCAL = 0x07, | ||
1270 | RDMAP_CATASTROPHIC_GLOBAL = 0x08, | ||
1271 | RDMAP_CANT_INV_STAG = 0x09, | ||
1272 | RDMAP_UNSPECIFIED = 0xff | ||
1273 | }; | ||
1274 | |||
1275 | enum nes_term_ddp_errors { | ||
1276 | DDP_CATASTROPHIC_LOCAL = 0x00, | ||
1277 | DDP_TAGGED_INV_STAG = 0x00, | ||
1278 | DDP_TAGGED_BOUNDS = 0x01, | ||
1279 | DDP_TAGGED_UNASSOC_STAG = 0x02, | ||
1280 | DDP_TAGGED_TO_WRAP = 0x03, | ||
1281 | DDP_TAGGED_INV_DDP_VER = 0x04, | ||
1282 | DDP_UNTAGGED_INV_QN = 0x01, | ||
1283 | DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, | ||
1284 | DDP_UNTAGGED_INV_MSN_RANGE = 0x03, | ||
1285 | DDP_UNTAGGED_INV_MO = 0x04, | ||
1286 | DDP_UNTAGGED_INV_TOO_LONG = 0x05, | ||
1287 | DDP_UNTAGGED_INV_DDP_VER = 0x06 | ||
1288 | }; | ||
1289 | |||
1290 | enum nes_term_mpa_errors { | ||
1291 | MPA_CLOSED = 0x01, | ||
1292 | MPA_CRC = 0x02, | ||
1293 | MPA_MARKER = 0x03, | ||
1294 | MPA_REQ_RSP = 0x04, | ||
1295 | }; | ||
1296 | |||
1297 | struct nes_terminate_hdr { | ||
1298 | u8 layer_etype; | ||
1299 | u8 error_code; | ||
1300 | u8 hdrct; | ||
1301 | u8 rsvd; | ||
1302 | }; | ||
1303 | |||
1304 | /* Used to determine how to fill in terminate error codes */ | ||
1305 | #define IWARP_OPCODE_WRITE 0 | ||
1306 | #define IWARP_OPCODE_READREQ 1 | ||
1307 | #define IWARP_OPCODE_READRSP 2 | ||
1308 | #define IWARP_OPCODE_SEND 3 | ||
1309 | #define IWARP_OPCODE_SEND_INV 4 | ||
1310 | #define IWARP_OPCODE_SEND_SE 5 | ||
1311 | #define IWARP_OPCODE_SEND_SE_INV 6 | ||
1312 | #define IWARP_OPCODE_TERM 7 | ||
1313 | |||
1314 | /* These values are used only during terminate processing */ | ||
1315 | #define TERM_DDP_LEN_TAGGED 14 | ||
1316 | #define TERM_DDP_LEN_UNTAGGED 18 | ||
1317 | #define TERM_RDMA_LEN 28 | ||
1318 | #define RDMA_OPCODE_MASK 0x0f | ||
1319 | #define RDMA_READ_REQ_OPCODE 1 | ||
1320 | #define BAD_FRAME_OFFSET 64 | ||
1321 | #define CQE_MAJOR_DRV 0x8000 | ||
1322 | |||
1220 | #define nes_vlan_rx vlan_hwaccel_receive_skb | 1323 | #define nes_vlan_rx vlan_hwaccel_receive_skb |
1221 | #define nes_netif_rx netif_receive_skb | 1324 | #define nes_netif_rx netif_receive_skb |
1222 | 1325 | ||
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index a282031d15c7..9687c397ce1a 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada | |||
183 | } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { | 183 | } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { |
184 | nesadapter->virtwq = 1; | 184 | nesadapter->virtwq = 1; |
185 | } | 185 | } |
186 | if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3)) | ||
187 | nesadapter->send_term_ok = 1; | ||
188 | |||
186 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + | 189 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + |
187 | (u32)((u8)eeprom_data); | 190 | (u32)((u8)eeprom_data); |
188 | 191 | ||
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) | |||
548 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 551 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
549 | } | 552 | } |
550 | if (cqp_request == NULL) { | 553 | if (cqp_request == NULL) { |
551 | cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); | 554 | cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC); |
552 | if (cqp_request) { | 555 | if (cqp_request) { |
553 | cqp_request->dynamic = 1; | 556 | cqp_request->dynamic = 1; |
554 | INIT_LIST_HEAD(&cqp_request->list); | 557 | INIT_LIST_HEAD(&cqp_request->list); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 21e0fd336cf7..a680c42d6e8c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop | |||
667 | */ | 667 | */ |
668 | static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) | 668 | static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) |
669 | { | 669 | { |
670 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
671 | struct net_device *netdev = nesvnic->netdev; | ||
672 | |||
670 | memset(props, 0, sizeof(*props)); | 673 | memset(props, 0, sizeof(*props)); |
671 | 674 | ||
672 | props->max_mtu = IB_MTU_2048; | 675 | props->max_mtu = IB_MTU_4096; |
673 | props->active_mtu = IB_MTU_2048; | 676 | |
677 | if (netdev->mtu >= 4096) | ||
678 | props->active_mtu = IB_MTU_4096; | ||
679 | else if (netdev->mtu >= 2048) | ||
680 | props->active_mtu = IB_MTU_2048; | ||
681 | else if (netdev->mtu >= 1024) | ||
682 | props->active_mtu = IB_MTU_1024; | ||
683 | else if (netdev->mtu >= 512) | ||
684 | props->active_mtu = IB_MTU_512; | ||
685 | else | ||
686 | props->active_mtu = IB_MTU_256; | ||
687 | |||
674 | props->lid = 1; | 688 | props->lid = 1; |
675 | props->lmc = 0; | 689 | props->lmc = 0; |
676 | props->sm_lid = 0; | 690 | props->sm_lid = 0; |
677 | props->sm_sl = 0; | 691 | props->sm_sl = 0; |
678 | props->state = IB_PORT_ACTIVE; | 692 | if (nesvnic->linkup) |
693 | props->state = IB_PORT_ACTIVE; | ||
694 | else | ||
695 | props->state = IB_PORT_DOWN; | ||
679 | props->phys_state = 0; | 696 | props->phys_state = 0; |
680 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | | 697 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | |
681 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | 698 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; |
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
1506 | 1523 | ||
1507 | 1524 | ||
1508 | /** | 1525 | /** |
1526 | * nes_clean_cq | ||
1527 | */ | ||
1528 | static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq) | ||
1529 | { | ||
1530 | u32 cq_head; | ||
1531 | u32 lo; | ||
1532 | u32 hi; | ||
1533 | u64 u64temp; | ||
1534 | unsigned long flags = 0; | ||
1535 | |||
1536 | spin_lock_irqsave(&nescq->lock, flags); | ||
1537 | |||
1538 | cq_head = nescq->hw_cq.cq_head; | ||
1539 | while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) { | ||
1540 | rmb(); | ||
1541 | lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); | ||
1542 | hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]); | ||
1543 | u64temp = (((u64)hi) << 32) | ((u64)lo); | ||
1544 | u64temp &= ~(NES_SW_CONTEXT_ALIGN-1); | ||
1545 | if (u64temp == (u64)(unsigned long)nesqp) { | ||
1546 | /* Zero the context value so cqe will be ignored */ | ||
1547 | nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0; | ||
1548 | nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0; | ||
1549 | } | ||
1550 | |||
1551 | if (++cq_head >= nescq->hw_cq.cq_size) | ||
1552 | cq_head = 0; | ||
1553 | } | ||
1554 | |||
1555 | spin_unlock_irqrestore(&nescq->lock, flags); | ||
1556 | } | ||
1557 | |||
1558 | |||
1559 | /** | ||
1509 | * nes_destroy_qp | 1560 | * nes_destroy_qp |
1510 | */ | 1561 | */ |
1511 | static int nes_destroy_qp(struct ib_qp *ibqp) | 1562 | static int nes_destroy_qp(struct ib_qp *ibqp) |
1512 | { | 1563 | { |
1513 | struct nes_qp *nesqp = to_nesqp(ibqp); | 1564 | struct nes_qp *nesqp = to_nesqp(ibqp); |
1514 | /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */ | ||
1515 | struct nes_ucontext *nes_ucontext; | 1565 | struct nes_ucontext *nes_ucontext; |
1516 | struct ib_qp_attr attr; | 1566 | struct ib_qp_attr attr; |
1517 | struct iw_cm_id *cm_id; | 1567 | struct iw_cm_id *cm_id; |
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1548 | nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); | 1598 | nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); |
1549 | } | 1599 | } |
1550 | 1600 | ||
1551 | |||
1552 | if (nesqp->user_mode) { | 1601 | if (nesqp->user_mode) { |
1553 | if ((ibqp->uobject)&&(ibqp->uobject->context)) { | 1602 | if ((ibqp->uobject)&&(ibqp->uobject->context)) { |
1554 | nes_ucontext = to_nesucontext(ibqp->uobject->context); | 1603 | nes_ucontext = to_nesucontext(ibqp->uobject->context); |
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1560 | } | 1609 | } |
1561 | if (nesqp->pbl_pbase) | 1610 | if (nesqp->pbl_pbase) |
1562 | kunmap(nesqp->page); | 1611 | kunmap(nesqp->page); |
1612 | } else { | ||
1613 | /* Clean any pending completions from the cq(s) */ | ||
1614 | if (nesqp->nesscq) | ||
1615 | nes_clean_cq(nesqp, nesqp->nesscq); | ||
1616 | |||
1617 | if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq)) | ||
1618 | nes_clean_cq(nesqp, nesqp->nesrcq); | ||
1563 | } | 1619 | } |
1564 | 1620 | ||
1565 | nes_rem_ref(&nesqp->ibqp); | 1621 | nes_rem_ref(&nesqp->ibqp); |
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
2884 | * nes_hw_modify_qp | 2940 | * nes_hw_modify_qp |
2885 | */ | 2941 | */ |
2886 | int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, | 2942 | int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, |
2887 | u32 next_iwarp_state, u32 wait_completion) | 2943 | u32 next_iwarp_state, u32 termlen, u32 wait_completion) |
2888 | { | 2944 | { |
2889 | struct nes_hw_cqp_wqe *cqp_wqe; | 2945 | struct nes_hw_cqp_wqe *cqp_wqe; |
2890 | /* struct iw_cm_id *cm_id = nesqp->cm_id; */ | 2946 | /* struct iw_cm_id *cm_id = nesqp->cm_id; */ |
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, | |||
2916 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | 2972 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); |
2917 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); | 2973 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); |
2918 | 2974 | ||
2975 | /* If sending a terminate message, fill in the length (in words) */ | ||
2976 | if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) && | ||
2977 | !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) { | ||
2978 | termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT; | ||
2979 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen); | ||
2980 | } | ||
2981 | |||
2919 | atomic_set(&cqp_request->refcount, 2); | 2982 | atomic_set(&cqp_request->refcount, 2); |
2920 | nes_post_cqp_request(nesdev, cqp_request); | 2983 | nes_post_cqp_request(nesdev, cqp_request); |
2921 | 2984 | ||
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3086 | } | 3149 | } |
3087 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", | 3150 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", |
3088 | nesqp->hwqp.qp_id); | 3151 | nesqp->hwqp.qp_id); |
3152 | if (nesqp->term_flags) | ||
3153 | del_timer(&nesqp->terminate_timer); | ||
3154 | |||
3089 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; | 3155 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; |
3090 | /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ | 3156 | /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ |
3091 | if (nesqp->hte_added) { | 3157 | if (nesqp->hte_added) { |
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3163 | 3229 | ||
3164 | if (issue_modify_qp) { | 3230 | if (issue_modify_qp) { |
3165 | nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); | 3231 | nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); |
3166 | ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); | 3232 | ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1); |
3167 | if (ret) | 3233 | if (ret) |
3168 | nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" | 3234 | nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" |
3169 | " failed for QP%u.\n", | 3235 | " failed for QP%u.\n", |
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3328 | head = nesqp->hwqp.sq_head; | 3394 | head = nesqp->hwqp.sq_head; |
3329 | 3395 | ||
3330 | while (ib_wr) { | 3396 | while (ib_wr) { |
3397 | /* Check for QP error */ | ||
3398 | if (nesqp->term_flags) { | ||
3399 | err = -EINVAL; | ||
3400 | break; | ||
3401 | } | ||
3402 | |||
3331 | /* Check for SQ overflow */ | 3403 | /* Check for SQ overflow */ |
3332 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { | 3404 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { |
3333 | err = -EINVAL; | 3405 | err = -EINVAL; |
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | |||
3484 | head = nesqp->hwqp.rq_head; | 3556 | head = nesqp->hwqp.rq_head; |
3485 | 3557 | ||
3486 | while (ib_wr) { | 3558 | while (ib_wr) { |
3559 | /* Check for QP error */ | ||
3560 | if (nesqp->term_flags) { | ||
3561 | err = -EINVAL; | ||
3562 | break; | ||
3563 | } | ||
3564 | |||
3487 | if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { | 3565 | if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { |
3488 | err = -EINVAL; | 3566 | err = -EINVAL; |
3489 | break; | 3567 | break; |
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3547 | { | 3625 | { |
3548 | u64 u64temp; | 3626 | u64 u64temp; |
3549 | u64 wrid; | 3627 | u64 wrid; |
3550 | /* u64 u64temp; */ | ||
3551 | unsigned long flags = 0; | 3628 | unsigned long flags = 0; |
3552 | struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); | 3629 | struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); |
3553 | struct nes_device *nesdev = nesvnic->nesdev; | 3630 | struct nes_device *nesdev = nesvnic->nesdev; |
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3555 | struct nes_qp *nesqp; | 3632 | struct nes_qp *nesqp; |
3556 | struct nes_hw_cqe cqe; | 3633 | struct nes_hw_cqe cqe; |
3557 | u32 head; | 3634 | u32 head; |
3558 | u32 wq_tail; | 3635 | u32 wq_tail = 0; |
3559 | u32 cq_size; | 3636 | u32 cq_size; |
3560 | u32 cqe_count = 0; | 3637 | u32 cqe_count = 0; |
3561 | u32 wqe_index; | 3638 | u32 wqe_index; |
3562 | u32 u32temp; | 3639 | u32 u32temp; |
3563 | /* u32 counter; */ | 3640 | u32 move_cq_head = 1; |
3641 | u32 err_code; | ||
3564 | 3642 | ||
3565 | nes_debug(NES_DBG_CQ, "\n"); | 3643 | nes_debug(NES_DBG_CQ, "\n"); |
3566 | 3644 | ||
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3570 | cq_size = nescq->hw_cq.cq_size; | 3648 | cq_size = nescq->hw_cq.cq_size; |
3571 | 3649 | ||
3572 | while (cqe_count < num_entries) { | 3650 | while (cqe_count < num_entries) { |
3573 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & | 3651 | if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & |
3574 | NES_CQE_VALID) { | 3652 | NES_CQE_VALID) == 0) |
3575 | /* | 3653 | break; |
3576 | * Make sure we read CQ entry contents *after* | 3654 | |
3577 | * we've checked the valid bit. | 3655 | /* |
3578 | */ | 3656 | * Make sure we read CQ entry contents *after* |
3579 | rmb(); | 3657 | * we've checked the valid bit. |
3580 | 3658 | */ | |
3581 | cqe = nescq->hw_cq.cq_vbase[head]; | 3659 | rmb(); |
3582 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | 3660 | |
3583 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); | 3661 | cqe = nescq->hw_cq.cq_vbase[head]; |
3584 | wqe_index = u32temp & | 3662 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); |
3585 | (nesdev->nesadapter->max_qp_wr - 1); | 3663 | wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1); |
3586 | u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); | 3664 | u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); |
3587 | /* parse CQE, get completion context from WQE (either rq or sq */ | 3665 | /* parse CQE, get completion context from WQE (either rq or sq) */ |
3588 | u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | | 3666 | u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | |
3589 | ((u64)u32temp); | 3667 | ((u64)u32temp); |
3590 | nesqp = *((struct nes_qp **)&u64temp); | 3668 | |
3669 | if (u64temp) { | ||
3670 | nesqp = (struct nes_qp *)(unsigned long)u64temp; | ||
3591 | memset(entry, 0, sizeof *entry); | 3671 | memset(entry, 0, sizeof *entry); |
3592 | if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { | 3672 | if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { |
3593 | entry->status = IB_WC_SUCCESS; | 3673 | entry->status = IB_WC_SUCCESS; |
3594 | } else { | 3674 | } else { |
3595 | entry->status = IB_WC_WR_FLUSH_ERR; | 3675 | err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]); |
3676 | if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) { | ||
3677 | entry->status = err_code & 0x0000ffff; | ||
3678 | |||
3679 | /* The rest of the cqe's will be marked as flushed */ | ||
3680 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] = | ||
3681 | cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) | | ||
3682 | NES_IWARP_CQE_MINOR_FLUSH); | ||
3683 | } else | ||
3684 | entry->status = IB_WC_WR_FLUSH_ERR; | ||
3596 | } | 3685 | } |
3597 | 3686 | ||
3598 | entry->qp = &nesqp->ibqp; | 3687 | entry->qp = &nesqp->ibqp; |
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3601 | if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { | 3690 | if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { |
3602 | if (nesqp->skip_lsmm) { | 3691 | if (nesqp->skip_lsmm) { |
3603 | nesqp->skip_lsmm = 0; | 3692 | nesqp->skip_lsmm = 0; |
3604 | wq_tail = nesqp->hwqp.sq_tail++; | 3693 | nesqp->hwqp.sq_tail++; |
3605 | } | 3694 | } |
3606 | 3695 | ||
3607 | /* Working on a SQ Completion*/ | 3696 | /* Working on a SQ Completion*/ |
3608 | wq_tail = wqe_index; | 3697 | wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index]. |
3609 | nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); | ||
3610 | wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. | ||
3611 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | | 3698 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | |
3612 | ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. | 3699 | ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index]. |
3613 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); | 3700 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); |
3614 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | 3701 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. |
3615 | wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); | 3702 | wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); |
3616 | 3703 | ||
3617 | switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | 3704 | switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. |
3618 | wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { | 3705 | wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { |
3619 | case NES_IWARP_SQ_OP_RDMAW: | 3706 | case NES_IWARP_SQ_OP_RDMAW: |
3620 | nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); | 3707 | nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); |
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3623 | case NES_IWARP_SQ_OP_RDMAR: | 3710 | case NES_IWARP_SQ_OP_RDMAR: |
3624 | nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); | 3711 | nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); |
3625 | entry->opcode = IB_WC_RDMA_READ; | 3712 | entry->opcode = IB_WC_RDMA_READ; |
3626 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | 3713 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. |
3627 | wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); | 3714 | wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); |
3628 | break; | 3715 | break; |
3629 | case NES_IWARP_SQ_OP_SENDINV: | 3716 | case NES_IWARP_SQ_OP_SENDINV: |
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3634 | entry->opcode = IB_WC_SEND; | 3721 | entry->opcode = IB_WC_SEND; |
3635 | break; | 3722 | break; |
3636 | } | 3723 | } |
3724 | |||
3725 | nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); | ||
3726 | if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) { | ||
3727 | move_cq_head = 0; | ||
3728 | wq_tail = nesqp->hwqp.sq_tail; | ||
3729 | } | ||
3637 | } else { | 3730 | } else { |
3638 | /* Working on a RQ Completion*/ | 3731 | /* Working on a RQ Completion*/ |
3639 | wq_tail = wqe_index; | ||
3640 | nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1); | ||
3641 | entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); | 3732 | entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); |
3642 | wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | | 3733 | wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | |
3643 | ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); | 3734 | ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); |
3644 | entry->opcode = IB_WC_RECV; | 3735 | entry->opcode = IB_WC_RECV; |
3736 | |||
3737 | nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1); | ||
3738 | if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) { | ||
3739 | move_cq_head = 0; | ||
3740 | wq_tail = nesqp->hwqp.rq_tail; | ||
3741 | } | ||
3645 | } | 3742 | } |
3743 | |||
3646 | entry->wr_id = wrid; | 3744 | entry->wr_id = wrid; |
3745 | entry++; | ||
3746 | cqe_count++; | ||
3747 | } | ||
3647 | 3748 | ||
3749 | if (move_cq_head) { | ||
3750 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | ||
3648 | if (++head >= cq_size) | 3751 | if (++head >= cq_size) |
3649 | head = 0; | 3752 | head = 0; |
3650 | cqe_count++; | ||
3651 | nescq->polled_completions++; | 3753 | nescq->polled_completions++; |
3754 | |||
3652 | if ((nescq->polled_completions > (cq_size / 2)) || | 3755 | if ((nescq->polled_completions > (cq_size / 2)) || |
3653 | (nescq->polled_completions == 255)) { | 3756 | (nescq->polled_completions == 255)) { |
3654 | nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" | 3757 | nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" |
3655 | " are pending %u of %u.\n", | 3758 | " are pending %u of %u.\n", |
3656 | nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); | 3759 | nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); |
3657 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | 3760 | nes_write32(nesdev->regs+NES_CQE_ALLOC, |
3658 | nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); | 3761 | nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); |
3659 | nescq->polled_completions = 0; | 3762 | nescq->polled_completions = 0; |
3660 | } | 3763 | } |
3661 | entry++; | 3764 | } else { |
3662 | } else | 3765 | /* Update the wqe index and set status to flush */ |
3663 | break; | 3766 | wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); |
3767 | wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail; | ||
3768 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = | ||
3769 | cpu_to_le32(wqe_index); | ||
3770 | move_cq_head = 1; /* ready for next pass */ | ||
3771 | } | ||
3664 | } | 3772 | } |
3665 | 3773 | ||
3666 | if (nescq->polled_completions) { | 3774 | if (nescq->polled_completions) { |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 41c07f29f7c9..89822d75f82e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h | |||
@@ -40,6 +40,10 @@ struct nes_device; | |||
40 | #define NES_MAX_USER_DB_REGIONS 4096 | 40 | #define NES_MAX_USER_DB_REGIONS 4096 |
41 | #define NES_MAX_USER_WQ_REGIONS 4096 | 41 | #define NES_MAX_USER_WQ_REGIONS 4096 |
42 | 42 | ||
43 | #define NES_TERM_SENT 0x01 | ||
44 | #define NES_TERM_RCVD 0x02 | ||
45 | #define NES_TERM_DONE 0x04 | ||
46 | |||
43 | struct nes_ucontext { | 47 | struct nes_ucontext { |
44 | struct ib_ucontext ibucontext; | 48 | struct ib_ucontext ibucontext; |
45 | struct nes_device *nesdev; | 49 | struct nes_device *nesdev; |
@@ -119,6 +123,11 @@ struct nes_wq { | |||
119 | spinlock_t lock; | 123 | spinlock_t lock; |
120 | }; | 124 | }; |
121 | 125 | ||
126 | struct disconn_work { | ||
127 | struct work_struct work; | ||
128 | struct nes_qp *nesqp; | ||
129 | }; | ||
130 | |||
122 | struct iw_cm_id; | 131 | struct iw_cm_id; |
123 | struct ietf_mpa_frame; | 132 | struct ietf_mpa_frame; |
124 | 133 | ||
@@ -127,7 +136,6 @@ struct nes_qp { | |||
127 | void *allocated_buffer; | 136 | void *allocated_buffer; |
128 | struct iw_cm_id *cm_id; | 137 | struct iw_cm_id *cm_id; |
129 | struct workqueue_struct *wq; | 138 | struct workqueue_struct *wq; |
130 | struct work_struct disconn_work; | ||
131 | struct nes_cq *nesscq; | 139 | struct nes_cq *nesscq; |
132 | struct nes_cq *nesrcq; | 140 | struct nes_cq *nesrcq; |
133 | struct nes_pd *nespd; | 141 | struct nes_pd *nespd; |
@@ -155,9 +163,13 @@ struct nes_qp { | |||
155 | void *pbl_vbase; | 163 | void *pbl_vbase; |
156 | dma_addr_t pbl_pbase; | 164 | dma_addr_t pbl_pbase; |
157 | struct page *page; | 165 | struct page *page; |
166 | struct timer_list terminate_timer; | ||
167 | enum ib_event_type terminate_eventtype; | ||
158 | wait_queue_head_t kick_waitq; | 168 | wait_queue_head_t kick_waitq; |
159 | u16 in_disconnect; | 169 | u16 in_disconnect; |
160 | u16 private_data_len; | 170 | u16 private_data_len; |
171 | u16 term_sq_flush_code; | ||
172 | u16 term_rq_flush_code; | ||
161 | u8 active_conn; | 173 | u8 active_conn; |
162 | u8 skip_lsmm; | 174 | u8 skip_lsmm; |
163 | u8 user_mode; | 175 | u8 user_mode; |
@@ -165,7 +177,7 @@ struct nes_qp { | |||
165 | u8 hw_iwarp_state; | 177 | u8 hw_iwarp_state; |
166 | u8 flush_issued; | 178 | u8 flush_issued; |
167 | u8 hw_tcp_state; | 179 | u8 hw_tcp_state; |
168 | u8 disconn_pending; | 180 | u8 term_flags; |
169 | u8 destroyed; | 181 | u8 destroyed; |
170 | }; | 182 | }; |
171 | #endif /* NES_VERBS_H */ | 183 | #endif /* NES_VERBS_H */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 986f07fb3ec4..30bdf427ee6d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -31,7 +31,6 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <rdma/ib_cm.h> | 33 | #include <rdma/ib_cm.h> |
34 | #include <rdma/ib_cache.h> | ||
35 | #include <net/dst.h> | 34 | #include <net/dst.h> |
36 | #include <net/icmp.h> | 35 | #include <net/icmp.h> |
37 | #include <linux/icmpv6.h> | 36 | #include <linux/icmpv6.h> |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index c9dcb2064f20..8c91d9f37ada 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/dma-mapping.h> | 37 | #include <linux/dma-mapping.h> |
38 | 38 | ||
39 | #include <rdma/ib_cache.h> | ||
40 | #include <linux/ip.h> | 39 | #include <linux/ip.h> |
41 | #include <linux/tcp.h> | 40 | #include <linux/tcp.h> |
42 | 41 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index e319d91f60a6..2bf5116deec4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
604 | skb_queue_len(&neigh->queue)); | 604 | skb_queue_len(&neigh->queue)); |
605 | goto err_drop; | 605 | goto err_drop; |
606 | } | 606 | } |
607 | } else | 607 | } else { |
608 | spin_unlock_irqrestore(&priv->lock, flags); | ||
608 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); | 609 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); |
610 | return; | ||
611 | } | ||
609 | } else { | 612 | } else { |
610 | neigh->ah = NULL; | 613 | neigh->ah = NULL; |
611 | 614 | ||
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
688 | ipoib_dbg(priv, "Send unicast ARP to %04x\n", | 691 | ipoib_dbg(priv, "Send unicast ARP to %04x\n", |
689 | be16_to_cpu(path->pathrec.dlid)); | 692 | be16_to_cpu(path->pathrec.dlid)); |
690 | 693 | ||
694 | spin_unlock_irqrestore(&priv->lock, flags); | ||
691 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); | 695 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); |
696 | return; | ||
692 | } else if ((path->query || !path_rec_start(dev, path)) && | 697 | } else if ((path->query || !path_rec_start(dev, path)) && |
693 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 698 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
694 | /* put pseudoheader back on for next time */ | 699 | /* put pseudoheader back on for next time */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index a0e97532e714..25874fc680c9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -720,7 +720,9 @@ out: | |||
720 | } | 720 | } |
721 | } | 721 | } |
722 | 722 | ||
723 | spin_unlock_irqrestore(&priv->lock, flags); | ||
723 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); | 724 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); |
725 | return; | ||
724 | } | 726 | } |
725 | 727 | ||
726 | unlock: | 728 | unlock: |
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
758 | } | 760 | } |
759 | } | 761 | } |
760 | 762 | ||
763 | static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, | ||
764 | const u8 *broadcast) | ||
765 | { | ||
766 | if (addrlen != INFINIBAND_ALEN) | ||
767 | return 0; | ||
768 | /* reserved QPN, prefix, scope */ | ||
769 | if (memcmp(addr, broadcast, 6)) | ||
770 | return 0; | ||
771 | /* signature lower, pkey */ | ||
772 | if (memcmp(addr + 7, broadcast + 7, 3)) | ||
773 | return 0; | ||
774 | return 1; | ||
775 | } | ||
776 | |||
761 | void ipoib_mcast_restart_task(struct work_struct *work) | 777 | void ipoib_mcast_restart_task(struct work_struct *work) |
762 | { | 778 | { |
763 | struct ipoib_dev_priv *priv = | 779 | struct ipoib_dev_priv *priv = |
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work) | |||
791 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | 807 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { |
792 | union ib_gid mgid; | 808 | union ib_gid mgid; |
793 | 809 | ||
810 | if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, | ||
811 | mclist->dmi_addrlen, | ||
812 | dev->broadcast)) | ||
813 | continue; | ||
814 | |||
794 | memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); | 815 | memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); |
795 | 816 | ||
796 | mcast = __ipoib_mcast_find(dev, &mgid); | 817 | mcast = __ipoib_mcast_find(dev, &mgid); |
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 4cfd084fa897..9a1d55b74d7a 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
@@ -456,8 +456,11 @@ static int joydev_ioctl_common(struct joydev *joydev, | |||
456 | unsigned int cmd, void __user *argp) | 456 | unsigned int cmd, void __user *argp) |
457 | { | 457 | { |
458 | struct input_dev *dev = joydev->handle.dev; | 458 | struct input_dev *dev = joydev->handle.dev; |
459 | size_t len; | ||
459 | int i, j; | 460 | int i, j; |
461 | const char *name; | ||
460 | 462 | ||
463 | /* Process fixed-sized commands. */ | ||
461 | switch (cmd) { | 464 | switch (cmd) { |
462 | 465 | ||
463 | case JS_SET_CAL: | 466 | case JS_SET_CAL: |
@@ -499,9 +502,22 @@ static int joydev_ioctl_common(struct joydev *joydev, | |||
499 | return copy_to_user(argp, joydev->corr, | 502 | return copy_to_user(argp, joydev->corr, |
500 | sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; | 503 | sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; |
501 | 504 | ||
502 | case JSIOCSAXMAP: | 505 | } |
503 | if (copy_from_user(joydev->abspam, argp, | 506 | |
504 | sizeof(__u8) * (ABS_MAX + 1))) | 507 | /* |
508 | * Process variable-sized commands (the axis and button map commands | ||
509 | * are considered variable-sized to decouple them from the values of | ||
510 | * ABS_MAX and KEY_MAX). | ||
511 | */ | ||
512 | switch (cmd & ~IOCSIZE_MASK) { | ||
513 | |||
514 | case (JSIOCSAXMAP & ~IOCSIZE_MASK): | ||
515 | len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam)); | ||
516 | /* | ||
517 | * FIXME: we should not copy into our axis map before | ||
518 | * validating the data. | ||
519 | */ | ||
520 | if (copy_from_user(joydev->abspam, argp, len)) | ||
505 | return -EFAULT; | 521 | return -EFAULT; |
506 | 522 | ||
507 | for (i = 0; i < joydev->nabs; i++) { | 523 | for (i = 0; i < joydev->nabs; i++) { |
@@ -511,13 +527,17 @@ static int joydev_ioctl_common(struct joydev *joydev, | |||
511 | } | 527 | } |
512 | return 0; | 528 | return 0; |
513 | 529 | ||
514 | case JSIOCGAXMAP: | 530 | case (JSIOCGAXMAP & ~IOCSIZE_MASK): |
515 | return copy_to_user(argp, joydev->abspam, | 531 | len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam)); |
516 | sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; | 532 | return copy_to_user(argp, joydev->abspam, len) ? -EFAULT : 0; |
517 | 533 | ||
518 | case JSIOCSBTNMAP: | 534 | case (JSIOCSBTNMAP & ~IOCSIZE_MASK): |
519 | if (copy_from_user(joydev->keypam, argp, | 535 | len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam)); |
520 | sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) | 536 | /* |
537 | * FIXME: we should not copy into our keymap before | ||
538 | * validating the data. | ||
539 | */ | ||
540 | if (copy_from_user(joydev->keypam, argp, len)) | ||
521 | return -EFAULT; | 541 | return -EFAULT; |
522 | 542 | ||
523 | for (i = 0; i < joydev->nkey; i++) { | 543 | for (i = 0; i < joydev->nkey; i++) { |
@@ -529,25 +549,19 @@ static int joydev_ioctl_common(struct joydev *joydev, | |||
529 | 549 | ||
530 | return 0; | 550 | return 0; |
531 | 551 | ||
532 | case JSIOCGBTNMAP: | 552 | case (JSIOCGBTNMAP & ~IOCSIZE_MASK): |
533 | return copy_to_user(argp, joydev->keypam, | 553 | len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam)); |
534 | sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; | 554 | return copy_to_user(argp, joydev->keypam, len) ? -EFAULT : 0; |
535 | 555 | ||
536 | default: | 556 | case JSIOCGNAME(0): |
537 | if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) { | 557 | name = dev->name; |
538 | int len; | 558 | if (!name) |
539 | const char *name = dev->name; | 559 | return 0; |
540 | 560 | ||
541 | if (!name) | 561 | len = min_t(size_t, _IOC_SIZE(cmd), strlen(name) + 1); |
542 | return 0; | 562 | return copy_to_user(argp, name, len) ? -EFAULT : len; |
543 | len = strlen(name) + 1; | ||
544 | if (len > _IOC_SIZE(cmd)) | ||
545 | len = _IOC_SIZE(cmd); | ||
546 | if (copy_to_user(argp, name, len)) | ||
547 | return -EFAULT; | ||
548 | return len; | ||
549 | } | ||
550 | } | 563 | } |
564 | |||
551 | return -EINVAL; | 565 | return -EINVAL; |
552 | } | 566 | } |
553 | 567 | ||
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index baabf8302645..f6c688cae334 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c | |||
@@ -74,6 +74,7 @@ static struct iforce_device iforce_device[] = { | |||
74 | { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce }, | 74 | { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce }, |
75 | { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //? | 75 | { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //? |
76 | { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? | 76 | { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? |
77 | { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, | ||
77 | { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? | 78 | { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? |
78 | { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? | 79 | { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? |
79 | { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? | 80 | { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? |
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index f83185aeb511..9f289d8f52c6 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c | |||
@@ -223,6 +223,7 @@ static struct usb_device_id iforce_usb_ids [] = { | |||
223 | { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ | 223 | { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ |
224 | { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ | 224 | { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ |
225 | { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ | 225 | { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ |
226 | { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ | ||
226 | { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ | 227 | { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ |
227 | { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ | 228 | { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ |
228 | { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ | 229 | { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 95fe0452dae4..6c6a09b1c0fe 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { | |||
880 | }; | 880 | }; |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate | ||
884 | * release for their volume buttons | ||
885 | */ | ||
886 | static unsigned int atkbd_hp_r4000_forced_release_keys[] = { | ||
887 | 0xae, 0xb0, -1U | ||
888 | }; | ||
889 | |||
890 | /* | ||
883 | * Samsung NC10,NC20 with Fn+F? key release not working | 891 | * Samsung NC10,NC20 with Fn+F? key release not working |
884 | */ | 892 | */ |
885 | static unsigned int atkbd_samsung_forced_release_keys[] = { | 893 | static unsigned int atkbd_samsung_forced_release_keys[] = { |
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1537 | .driver_data = atkbd_hp_zv6100_forced_release_keys, | 1545 | .driver_data = atkbd_hp_zv6100_forced_release_keys, |
1538 | }, | 1546 | }, |
1539 | { | 1547 | { |
1548 | .ident = "HP Presario R4000", | ||
1549 | .matches = { | ||
1550 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1551 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), | ||
1552 | }, | ||
1553 | .callback = atkbd_setup_forced_release, | ||
1554 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1555 | }, | ||
1556 | { | ||
1557 | .ident = "HP Presario R4100", | ||
1558 | .matches = { | ||
1559 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1560 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), | ||
1561 | }, | ||
1562 | .callback = atkbd_setup_forced_release, | ||
1563 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1564 | }, | ||
1565 | { | ||
1566 | .ident = "HP Presario R4200", | ||
1567 | .matches = { | ||
1568 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1569 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), | ||
1570 | }, | ||
1571 | .callback = atkbd_setup_forced_release, | ||
1572 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1573 | }, | ||
1574 | { | ||
1540 | .ident = "Inventec Symphony", | 1575 | .ident = "Inventec Symphony", |
1541 | .matches = { | 1576 | .matches = { |
1542 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), | 1577 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ae04d8a494e5..ccbf23ece8e3 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { | |||
382 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), | 382 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), |
383 | }, | 383 | }, |
384 | }, | 384 | }, |
385 | { | ||
386 | .ident = "Acer Aspire 5536", | ||
387 | .matches = { | ||
388 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
389 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), | ||
390 | DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), | ||
391 | }, | ||
392 | }, | ||
385 | { } | 393 | { } |
386 | }; | 394 | }; |
387 | 395 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index a9d5031b855e..ea30c983a33e 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -388,6 +388,32 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi | |||
388 | return result; | 388 | return result; |
389 | } | 389 | } |
390 | 390 | ||
391 | static int wacom_query_tablet_data(struct usb_interface *intf) | ||
392 | { | ||
393 | unsigned char *rep_data; | ||
394 | int limit = 0; | ||
395 | int error; | ||
396 | |||
397 | rep_data = kmalloc(2, GFP_KERNEL); | ||
398 | if (!rep_data) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | do { | ||
402 | rep_data[0] = 2; | ||
403 | rep_data[1] = 2; | ||
404 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, | ||
405 | 2, rep_data, 2); | ||
406 | if (error >= 0) | ||
407 | error = usb_get_report(intf, | ||
408 | WAC_HID_FEATURE_REPORT, 2, | ||
409 | rep_data, 2); | ||
410 | } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); | ||
411 | |||
412 | kfree(rep_data); | ||
413 | |||
414 | return error < 0 ? error : 0; | ||
415 | } | ||
416 | |||
391 | static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) | 417 | static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) |
392 | { | 418 | { |
393 | struct usb_device *dev = interface_to_usbdev(intf); | 419 | struct usb_device *dev = interface_to_usbdev(intf); |
@@ -398,7 +424,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i | |||
398 | struct wacom_features *features; | 424 | struct wacom_features *features; |
399 | struct input_dev *input_dev; | 425 | struct input_dev *input_dev; |
400 | int error = -ENOMEM; | 426 | int error = -ENOMEM; |
401 | char rep_data[2], limit = 0; | ||
402 | struct hid_descriptor *hid_desc; | 427 | struct hid_descriptor *hid_desc; |
403 | 428 | ||
404 | wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); | 429 | wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); |
@@ -489,20 +514,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i | |||
489 | 514 | ||
490 | /* | 515 | /* |
491 | * Ask the tablet to report tablet data if it is not a Tablet PC. | 516 | * Ask the tablet to report tablet data if it is not a Tablet PC. |
492 | * Repeat until it succeeds | 517 | * Note that if query fails it is not a hard failure. |
493 | */ | 518 | */ |
494 | if (wacom_wac->features->type != TABLETPC) { | 519 | if (wacom_wac->features->type != TABLETPC) |
495 | do { | 520 | wacom_query_tablet_data(intf); |
496 | rep_data[0] = 2; | ||
497 | rep_data[1] = 2; | ||
498 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, | ||
499 | 2, rep_data, 2); | ||
500 | if (error >= 0) | ||
501 | error = usb_get_report(intf, | ||
502 | WAC_HID_FEATURE_REPORT, 2, | ||
503 | rep_data, 2); | ||
504 | } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); | ||
505 | } | ||
506 | 521 | ||
507 | usb_set_intfdata(intf, wacom); | 522 | usb_set_intfdata(intf, wacom); |
508 | return 0; | 523 | return 0; |
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index 6954f5500108..3a7a58222f83 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c | |||
@@ -170,11 +170,11 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb) | |||
170 | ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr); | 170 | ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr); |
171 | ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); | 171 | ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); |
172 | 172 | ||
173 | if (isr & UCB_IE_TSPX) { | 173 | if (isr & UCB_IE_TSPX) |
174 | ucb1400_ts_irq_disable(ucb->ac97); | 174 | ucb1400_ts_irq_disable(ucb->ac97); |
175 | enable_irq(ucb->irq); | 175 | else |
176 | } else | 176 | dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr); |
177 | printk(KERN_ERR "ucb1400: unexpected IE_STATUS = %#x\n", isr); | 177 | enable_irq(ucb->irq); |
178 | } | 178 | } |
179 | 179 | ||
180 | static int ucb1400_ts_thread(void *_ucb) | 180 | static int ucb1400_ts_thread(void *_ucb) |
@@ -345,6 +345,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb) | |||
345 | static int ucb1400_ts_probe(struct platform_device *dev) | 345 | static int ucb1400_ts_probe(struct platform_device *dev) |
346 | { | 346 | { |
347 | int error, x_res, y_res; | 347 | int error, x_res, y_res; |
348 | u16 fcsr; | ||
348 | struct ucb1400_ts *ucb = dev->dev.platform_data; | 349 | struct ucb1400_ts *ucb = dev->dev.platform_data; |
349 | 350 | ||
350 | ucb->ts_idev = input_allocate_device(); | 351 | ucb->ts_idev = input_allocate_device(); |
@@ -382,6 +383,14 @@ static int ucb1400_ts_probe(struct platform_device *dev) | |||
382 | ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); | 383 | ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); |
383 | ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); | 384 | ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); |
384 | 385 | ||
386 | /* | ||
387 | * Enable ADC filter to prevent horrible jitter on Colibri. | ||
388 | * This also further reduces jitter on boards where ADCSYNC | ||
389 | * pin is connected. | ||
390 | */ | ||
391 | fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR); | ||
392 | ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE); | ||
393 | |||
385 | ucb1400_adc_enable(ucb->ac97); | 394 | ucb1400_adc_enable(ucb->ac97); |
386 | x_res = ucb1400_ts_read_xres(ucb); | 395 | x_res = ucb1400_ts_read_xres(ucb); |
387 | y_res = ucb1400_ts_read_yres(ucb); | 396 | y_res = ucb1400_ts_read_yres(ucb); |
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c index a247ae63374f..1bc5db4ece0d 100644 --- a/drivers/leds/ledtrig-gpio.c +++ b/drivers/leds/ledtrig-gpio.c | |||
@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev, | |||
117 | 117 | ||
118 | gpio_data->inverted = !!inverted; | 118 | gpio_data->inverted = !!inverted; |
119 | 119 | ||
120 | /* After inverting, we need to update the LED. */ | ||
121 | schedule_work(&gpio_data->work); | ||
122 | |||
120 | return n; | 123 | return n; |
121 | } | 124 | } |
122 | static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, | 125 | static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, |
@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev, | |||
146 | return -EINVAL; | 149 | return -EINVAL; |
147 | } | 150 | } |
148 | 151 | ||
152 | if (gpio_data->gpio == gpio) | ||
153 | return n; | ||
154 | |||
149 | if (!gpio) { | 155 | if (!gpio) { |
150 | free_irq(gpio_to_irq(gpio_data->gpio), led); | 156 | if (gpio_data->gpio != 0) |
157 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
158 | gpio_data->gpio = 0; | ||
151 | return n; | 159 | return n; |
152 | } | 160 | } |
153 | 161 | ||
154 | if (gpio_data->gpio > 0 && gpio_data->gpio != gpio) | ||
155 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
156 | |||
157 | gpio_data->gpio = gpio; | ||
158 | ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, | 162 | ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, |
159 | IRQF_SHARED | IRQF_TRIGGER_RISING | 163 | IRQF_SHARED | IRQF_TRIGGER_RISING |
160 | | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); | 164 | | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); |
161 | if (ret) | 165 | if (ret) { |
162 | dev_err(dev, "request_irq failed with error %d\n", ret); | 166 | dev_err(dev, "request_irq failed with error %d\n", ret); |
167 | } else { | ||
168 | if (gpio_data->gpio != 0) | ||
169 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
170 | gpio_data->gpio = gpio; | ||
171 | } | ||
163 | 172 | ||
164 | return ret ? ret : n; | 173 | return ret ? ret : n; |
165 | } | 174 | } |
@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led) | |||
211 | device_remove_file(led->dev, &dev_attr_inverted); | 220 | device_remove_file(led->dev, &dev_attr_inverted); |
212 | device_remove_file(led->dev, &dev_attr_desired_brightness); | 221 | device_remove_file(led->dev, &dev_attr_desired_brightness); |
213 | flush_work(&gpio_data->work); | 222 | flush_work(&gpio_data->work); |
214 | free_irq(gpio_to_irq(gpio_data->gpio),led); | 223 | if (gpio_data->gpio != 0) |
224 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
215 | kfree(gpio_data); | 225 | kfree(gpio_data); |
216 | } | 226 | } |
217 | } | 227 | } |
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c index 4d686c0bdea0..9ab5b0c34f0d 100644 --- a/drivers/macintosh/via-maciisi.c +++ b/drivers/macintosh/via-maciisi.c | |||
@@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req) | |||
288 | } | 288 | } |
289 | /* This could be BAD... when the ADB controller doesn't respond | 289 | /* This could be BAD... when the ADB controller doesn't respond |
290 | * for this long, it's probably not coming back :-( */ | 290 | * for this long, it's probably not coming back :-( */ |
291 | if(count >= 50) /* Hopefully shouldn't happen */ | 291 | if (count > 50) /* Hopefully shouldn't happen */ |
292 | printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); | 292 | printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); |
293 | } | 293 | } |
294 | 294 | ||
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 3710ff88fc10..556acff3952f 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
171 | */ | 171 | */ |
172 | chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); | 172 | chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); |
173 | 173 | ||
174 | return dm_exception_store_set_chunk_size(store, chunk_size_ulong, | ||
175 | error); | ||
176 | } | ||
177 | |||
178 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | ||
179 | unsigned long chunk_size_ulong, | ||
180 | char **error) | ||
181 | { | ||
174 | /* Check chunk_size is a power of 2 */ | 182 | /* Check chunk_size is a power of 2 */ |
175 | if (!is_power_of_2(chunk_size_ulong)) { | 183 | if (!is_power_of_2(chunk_size_ulong)) { |
176 | *error = "Chunk size is not a power of 2"; | 184 | *error = "Chunk size is not a power of 2"; |
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
183 | return -EINVAL; | 191 | return -EINVAL; |
184 | } | 192 | } |
185 | 193 | ||
194 | if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { | ||
195 | *error = "Chunk size is too high"; | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
186 | store->chunk_size = chunk_size_ulong; | 199 | store->chunk_size = chunk_size_ulong; |
187 | store->chunk_mask = chunk_size_ulong - 1; | 200 | store->chunk_mask = chunk_size_ulong - 1; |
188 | store->chunk_shift = ffs(chunk_size_ulong) - 1; | 201 | store->chunk_shift = ffs(chunk_size_ulong) - 1; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 2442c8c07898..812c71872ba0 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store, | |||
168 | int dm_exception_store_type_register(struct dm_exception_store_type *type); | 168 | int dm_exception_store_type_register(struct dm_exception_store_type *type); |
169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); | 169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); |
170 | 170 | ||
171 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | ||
172 | unsigned long chunk_size_ulong, | ||
173 | char **error); | ||
174 | |||
171 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | 175 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, |
172 | unsigned *args_used, | 176 | unsigned *args_used, |
173 | struct dm_exception_store **store); | 177 | struct dm_exception_store **store); |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index e69b96560997..652bd33109e3 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -21,6 +21,7 @@ struct log_c { | |||
21 | struct dm_target *ti; | 21 | struct dm_target *ti; |
22 | uint32_t region_size; | 22 | uint32_t region_size; |
23 | region_t region_count; | 23 | region_t region_count; |
24 | uint64_t luid; | ||
24 | char uuid[DM_UUID_LEN]; | 25 | char uuid[DM_UUID_LEN]; |
25 | 26 | ||
26 | char *usr_argv_str; | 27 | char *usr_argv_str; |
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid, | |||
63 | * restored. | 64 | * restored. |
64 | */ | 65 | */ |
65 | retry: | 66 | retry: |
66 | r = dm_consult_userspace(uuid, request_type, data, | 67 | r = dm_consult_userspace(uuid, lc->luid, request_type, data, |
67 | data_size, rdata, rdata_size); | 68 | data_size, rdata, rdata_size); |
68 | 69 | ||
69 | if (r != -ESRCH) | 70 | if (r != -ESRCH) |
@@ -74,14 +75,15 @@ retry: | |||
74 | set_current_state(TASK_INTERRUPTIBLE); | 75 | set_current_state(TASK_INTERRUPTIBLE); |
75 | schedule_timeout(2*HZ); | 76 | schedule_timeout(2*HZ); |
76 | DMWARN("Attempting to contact userspace log server..."); | 77 | DMWARN("Attempting to contact userspace log server..."); |
77 | r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, | 78 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, |
79 | lc->usr_argv_str, | ||
78 | strlen(lc->usr_argv_str) + 1, | 80 | strlen(lc->usr_argv_str) + 1, |
79 | NULL, NULL); | 81 | NULL, NULL); |
80 | if (!r) | 82 | if (!r) |
81 | break; | 83 | break; |
82 | } | 84 | } |
83 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); | 85 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); |
84 | r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, | 86 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, |
85 | 0, NULL, NULL); | 87 | 0, NULL, NULL); |
86 | if (!r) | 88 | if (!r) |
87 | goto retry; | 89 | goto retry; |
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti, | |||
111 | return -ENOMEM; | 113 | return -ENOMEM; |
112 | } | 114 | } |
113 | 115 | ||
114 | for (i = 0, str_size = 0; i < argc; i++) | 116 | str_size = sprintf(str, "%llu", (unsigned long long)ti->len); |
115 | str_size += sprintf(str + str_size, "%s ", argv[i]); | 117 | for (i = 0; i < argc; i++) |
116 | str_size += sprintf(str + str_size, "%llu", | 118 | str_size += sprintf(str + str_size, " %s", argv[i]); |
117 | (unsigned long long)ti->len); | ||
118 | 119 | ||
119 | *ctr_str = str; | 120 | *ctr_str = str; |
120 | return str_size; | 121 | return str_size; |
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
154 | return -ENOMEM; | 155 | return -ENOMEM; |
155 | } | 156 | } |
156 | 157 | ||
158 | /* The ptr value is sufficient for local unique id */ | ||
159 | lc->luid = (uint64_t)lc; | ||
160 | |||
157 | lc->ti = ti; | 161 | lc->ti = ti; |
158 | 162 | ||
159 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { | 163 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { |
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
173 | } | 177 | } |
174 | 178 | ||
175 | /* Send table string */ | 179 | /* Send table string */ |
176 | r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, | 180 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, |
177 | ctr_str, str_size, NULL, NULL); | 181 | ctr_str, str_size, NULL, NULL); |
178 | 182 | ||
179 | if (r == -ESRCH) { | 183 | if (r == -ESRCH) { |
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
183 | 187 | ||
184 | /* Since the region size does not change, get it now */ | 188 | /* Since the region size does not change, get it now */ |
185 | rdata_size = sizeof(rdata); | 189 | rdata_size = sizeof(rdata); |
186 | r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, | 190 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, |
187 | NULL, 0, (char *)&rdata, &rdata_size); | 191 | NULL, 0, (char *)&rdata, &rdata_size); |
188 | 192 | ||
189 | if (r) { | 193 | if (r) { |
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log) | |||
212 | int r; | 216 | int r; |
213 | struct log_c *lc = log->context; | 217 | struct log_c *lc = log->context; |
214 | 218 | ||
215 | r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, | 219 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, |
216 | NULL, 0, | 220 | NULL, 0, |
217 | NULL, NULL); | 221 | NULL, NULL); |
218 | 222 | ||
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log) | |||
227 | int r; | 231 | int r; |
228 | struct log_c *lc = log->context; | 232 | struct log_c *lc = log->context; |
229 | 233 | ||
230 | r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, | 234 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, |
231 | NULL, 0, | 235 | NULL, 0, |
232 | NULL, NULL); | 236 | NULL, NULL); |
233 | 237 | ||
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log) | |||
239 | int r; | 243 | int r; |
240 | struct log_c *lc = log->context; | 244 | struct log_c *lc = log->context; |
241 | 245 | ||
242 | r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, | 246 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, |
243 | NULL, 0, | 247 | NULL, 0, |
244 | NULL, NULL); | 248 | NULL, NULL); |
245 | 249 | ||
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log) | |||
252 | struct log_c *lc = log->context; | 256 | struct log_c *lc = log->context; |
253 | 257 | ||
254 | lc->in_sync_hint = 0; | 258 | lc->in_sync_hint = 0; |
255 | r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, | 259 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, |
256 | NULL, 0, | 260 | NULL, 0, |
257 | NULL, NULL); | 261 | NULL, NULL); |
258 | 262 | ||
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | |||
561 | char *result, unsigned maxlen) | 565 | char *result, unsigned maxlen) |
562 | { | 566 | { |
563 | int r = 0; | 567 | int r = 0; |
568 | char *table_args; | ||
564 | size_t sz = (size_t)maxlen; | 569 | size_t sz = (size_t)maxlen; |
565 | struct log_c *lc = log->context; | 570 | struct log_c *lc = log->context; |
566 | 571 | ||
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | |||
577 | break; | 582 | break; |
578 | case STATUSTYPE_TABLE: | 583 | case STATUSTYPE_TABLE: |
579 | sz = 0; | 584 | sz = 0; |
580 | DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, | 585 | table_args = strchr(lc->usr_argv_str, ' '); |
581 | lc->uuid, lc->usr_argv_str); | 586 | BUG_ON(!table_args); /* There will always be a ' ' */ |
587 | table_args++; | ||
588 | |||
589 | DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, | ||
590 | lc->uuid, table_args); | ||
582 | break; | 591 | break; |
583 | } | 592 | } |
584 | return (r) ? 0 : (int)sz; | 593 | return (r) ? 0 : (int)sz; |
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 0ca1ee768a1f..ba0edad2d048 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -108,7 +108,7 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) | |||
108 | *(pkg->data_size) = 0; | 108 | *(pkg->data_size) = 0; |
109 | } else if (tfr->data_size > *(pkg->data_size)) { | 109 | } else if (tfr->data_size > *(pkg->data_size)) { |
110 | DMERR("Insufficient space to receive package [%u] " | 110 | DMERR("Insufficient space to receive package [%u] " |
111 | "(%u vs %lu)", tfr->request_type, | 111 | "(%u vs %zu)", tfr->request_type, |
112 | tfr->data_size, *(pkg->data_size)); | 112 | tfr->data_size, *(pkg->data_size)); |
113 | 113 | ||
114 | *(pkg->data_size) = 0; | 114 | *(pkg->data_size) = 0; |
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data) | |||
147 | 147 | ||
148 | /** | 148 | /** |
149 | * dm_consult_userspace | 149 | * dm_consult_userspace |
150 | * @uuid: log's uuid (must be DM_UUID_LEN in size) | 150 | * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size) |
151 | * @luid: log's local unique identifier | ||
151 | * @request_type: found in include/linux/dm-log-userspace.h | 152 | * @request_type: found in include/linux/dm-log-userspace.h |
152 | * @data: data to tx to the server | 153 | * @data: data to tx to the server |
153 | * @data_size: size of data in bytes | 154 | * @data_size: size of data in bytes |
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data) | |||
163 | * | 164 | * |
164 | * Returns: 0 on success, -EXXX on failure | 165 | * Returns: 0 on success, -EXXX on failure |
165 | **/ | 166 | **/ |
166 | int dm_consult_userspace(const char *uuid, int request_type, | 167 | int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, |
167 | char *data, size_t data_size, | 168 | char *data, size_t data_size, |
168 | char *rdata, size_t *rdata_size) | 169 | char *rdata, size_t *rdata_size) |
169 | { | 170 | { |
@@ -190,6 +191,7 @@ resend: | |||
190 | 191 | ||
191 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | 192 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); |
192 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | 193 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); |
194 | tfr->luid = luid; | ||
193 | tfr->seq = dm_ulog_seq++; | 195 | tfr->seq = dm_ulog_seq++; |
194 | 196 | ||
195 | /* | 197 | /* |
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h index c26d8e4e2710..04ee874f9153 100644 --- a/drivers/md/dm-log-userspace-transfer.h +++ b/drivers/md/dm-log-userspace-transfer.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | int dm_ulog_tfr_init(void); | 12 | int dm_ulog_tfr_init(void); |
13 | void dm_ulog_tfr_exit(void); | 13 | void dm_ulog_tfr_exit(void); |
14 | int dm_consult_userspace(const char *uuid, int request_type, | 14 | int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, |
15 | char *data, size_t data_size, | 15 | char *data, size_t data_size, |
16 | char *rdata, size_t *rdata_size); | 16 | char *rdata, size_t *rdata_size); |
17 | 17 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9726577cde49..33f179e66bf5 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
648 | */ | 648 | */ |
649 | dm_rh_inc_pending(ms->rh, &sync); | 649 | dm_rh_inc_pending(ms->rh, &sync); |
650 | dm_rh_inc_pending(ms->rh, &nosync); | 650 | dm_rh_inc_pending(ms->rh, &nosync); |
651 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; | 651 | |
652 | /* | ||
653 | * If the flush fails on a previous call and succeeds here, | ||
654 | * we must not reset the log_failure variable. We need | ||
655 | * userspace interaction to do that. | ||
656 | */ | ||
657 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; | ||
652 | 658 | ||
653 | /* | 659 | /* |
654 | * Dispatch io. | 660 | * Dispatch io. |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 6e3fe4f14934..d5b2e08750d5 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -106,6 +106,13 @@ struct pstore { | |||
106 | void *zero_area; | 106 | void *zero_area; |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * An area used for header. The header can be written | ||
110 | * concurrently with metadata (when invalidating the snapshot), | ||
111 | * so it needs a separate buffer. | ||
112 | */ | ||
113 | void *header_area; | ||
114 | |||
115 | /* | ||
109 | * Used to keep track of which metadata area the data in | 116 | * Used to keep track of which metadata area the data in |
110 | * 'chunk' refers to. | 117 | * 'chunk' refers to. |
111 | */ | 118 | */ |
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps) | |||
148 | */ | 155 | */ |
149 | ps->area = vmalloc(len); | 156 | ps->area = vmalloc(len); |
150 | if (!ps->area) | 157 | if (!ps->area) |
151 | return r; | 158 | goto err_area; |
152 | 159 | ||
153 | ps->zero_area = vmalloc(len); | 160 | ps->zero_area = vmalloc(len); |
154 | if (!ps->zero_area) { | 161 | if (!ps->zero_area) |
155 | vfree(ps->area); | 162 | goto err_zero_area; |
156 | return r; | ||
157 | } | ||
158 | memset(ps->zero_area, 0, len); | 163 | memset(ps->zero_area, 0, len); |
159 | 164 | ||
165 | ps->header_area = vmalloc(len); | ||
166 | if (!ps->header_area) | ||
167 | goto err_header_area; | ||
168 | |||
160 | return 0; | 169 | return 0; |
170 | |||
171 | err_header_area: | ||
172 | vfree(ps->zero_area); | ||
173 | |||
174 | err_zero_area: | ||
175 | vfree(ps->area); | ||
176 | |||
177 | err_area: | ||
178 | return r; | ||
161 | } | 179 | } |
162 | 180 | ||
163 | static void free_area(struct pstore *ps) | 181 | static void free_area(struct pstore *ps) |
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps) | |||
169 | if (ps->zero_area) | 187 | if (ps->zero_area) |
170 | vfree(ps->zero_area); | 188 | vfree(ps->zero_area); |
171 | ps->zero_area = NULL; | 189 | ps->zero_area = NULL; |
190 | |||
191 | if (ps->header_area) | ||
192 | vfree(ps->header_area); | ||
193 | ps->header_area = NULL; | ||
172 | } | 194 | } |
173 | 195 | ||
174 | struct mdata_req { | 196 | struct mdata_req { |
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work) | |||
188 | /* | 210 | /* |
189 | * Read or write a chunk aligned and sized block of data from a device. | 211 | * Read or write a chunk aligned and sized block of data from a device. |
190 | */ | 212 | */ |
191 | static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) | 213 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, |
214 | int metadata) | ||
192 | { | 215 | { |
193 | struct dm_io_region where = { | 216 | struct dm_io_region where = { |
194 | .bdev = ps->store->cow->bdev, | 217 | .bdev = ps->store->cow->bdev, |
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) | |||
198 | struct dm_io_request io_req = { | 221 | struct dm_io_request io_req = { |
199 | .bi_rw = rw, | 222 | .bi_rw = rw, |
200 | .mem.type = DM_IO_VMA, | 223 | .mem.type = DM_IO_VMA, |
201 | .mem.ptr.vma = ps->area, | 224 | .mem.ptr.vma = area, |
202 | .client = ps->io_client, | 225 | .client = ps->io_client, |
203 | .notify.fn = NULL, | 226 | .notify.fn = NULL, |
204 | }; | 227 | }; |
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw) | |||
240 | 263 | ||
241 | chunk = area_location(ps, ps->current_area); | 264 | chunk = area_location(ps, ps->current_area); |
242 | 265 | ||
243 | r = chunk_io(ps, chunk, rw, 0); | 266 | r = chunk_io(ps, ps->area, chunk, rw, 0); |
244 | if (r) | 267 | if (r) |
245 | return r; | 268 | return r; |
246 | 269 | ||
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps) | |||
254 | 277 | ||
255 | static int zero_disk_area(struct pstore *ps, chunk_t area) | 278 | static int zero_disk_area(struct pstore *ps, chunk_t area) |
256 | { | 279 | { |
257 | struct dm_io_region where = { | 280 | return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); |
258 | .bdev = ps->store->cow->bdev, | ||
259 | .sector = ps->store->chunk_size * area_location(ps, area), | ||
260 | .count = ps->store->chunk_size, | ||
261 | }; | ||
262 | struct dm_io_request io_req = { | ||
263 | .bi_rw = WRITE, | ||
264 | .mem.type = DM_IO_VMA, | ||
265 | .mem.ptr.vma = ps->zero_area, | ||
266 | .client = ps->io_client, | ||
267 | .notify.fn = NULL, | ||
268 | }; | ||
269 | |||
270 | return dm_io(&io_req, 1, &where, NULL); | ||
271 | } | 281 | } |
272 | 282 | ||
273 | static int read_header(struct pstore *ps, int *new_snapshot) | 283 | static int read_header(struct pstore *ps, int *new_snapshot) |
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
276 | struct disk_header *dh; | 286 | struct disk_header *dh; |
277 | chunk_t chunk_size; | 287 | chunk_t chunk_size; |
278 | int chunk_size_supplied = 1; | 288 | int chunk_size_supplied = 1; |
289 | char *chunk_err; | ||
279 | 290 | ||
280 | /* | 291 | /* |
281 | * Use default chunk size (or hardsect_size, if larger) if none supplied | 292 | * Use default chunk size (or hardsect_size, if larger) if none supplied |
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
297 | if (r) | 308 | if (r) |
298 | return r; | 309 | return r; |
299 | 310 | ||
300 | r = chunk_io(ps, 0, READ, 1); | 311 | r = chunk_io(ps, ps->header_area, 0, READ, 1); |
301 | if (r) | 312 | if (r) |
302 | goto bad; | 313 | goto bad; |
303 | 314 | ||
304 | dh = (struct disk_header *) ps->area; | 315 | dh = ps->header_area; |
305 | 316 | ||
306 | if (le32_to_cpu(dh->magic) == 0) { | 317 | if (le32_to_cpu(dh->magic) == 0) { |
307 | *new_snapshot = 1; | 318 | *new_snapshot = 1; |
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
319 | ps->version = le32_to_cpu(dh->version); | 330 | ps->version = le32_to_cpu(dh->version); |
320 | chunk_size = le32_to_cpu(dh->chunk_size); | 331 | chunk_size = le32_to_cpu(dh->chunk_size); |
321 | 332 | ||
322 | if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) | 333 | if (ps->store->chunk_size == chunk_size) |
323 | return 0; | 334 | return 0; |
324 | 335 | ||
325 | DMWARN("chunk size %llu in device metadata overrides " | 336 | if (chunk_size_supplied) |
326 | "table chunk size of %llu.", | 337 | DMWARN("chunk size %llu in device metadata overrides " |
327 | (unsigned long long)chunk_size, | 338 | "table chunk size of %llu.", |
328 | (unsigned long long)ps->store->chunk_size); | 339 | (unsigned long long)chunk_size, |
340 | (unsigned long long)ps->store->chunk_size); | ||
329 | 341 | ||
330 | /* We had a bogus chunk_size. Fix stuff up. */ | 342 | /* We had a bogus chunk_size. Fix stuff up. */ |
331 | free_area(ps); | 343 | free_area(ps); |
332 | 344 | ||
333 | ps->store->chunk_size = chunk_size; | 345 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
334 | ps->store->chunk_mask = chunk_size - 1; | 346 | &chunk_err); |
335 | ps->store->chunk_shift = ffs(chunk_size) - 1; | 347 | if (r) { |
348 | DMERR("invalid on-disk chunk size %llu: %s.", | ||
349 | (unsigned long long)chunk_size, chunk_err); | ||
350 | return r; | ||
351 | } | ||
336 | 352 | ||
337 | r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), | 353 | r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), |
338 | ps->io_client); | 354 | ps->io_client); |
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps) | |||
351 | { | 367 | { |
352 | struct disk_header *dh; | 368 | struct disk_header *dh; |
353 | 369 | ||
354 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); | 370 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
355 | 371 | ||
356 | dh = (struct disk_header *) ps->area; | 372 | dh = ps->header_area; |
357 | dh->magic = cpu_to_le32(SNAP_MAGIC); | 373 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
358 | dh->valid = cpu_to_le32(ps->valid); | 374 | dh->valid = cpu_to_le32(ps->valid); |
359 | dh->version = cpu_to_le32(ps->version); | 375 | dh->version = cpu_to_le32(ps->version); |
360 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); | 376 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
361 | 377 | ||
362 | return chunk_io(ps, 0, WRITE, 1); | 378 | return chunk_io(ps, ps->header_area, 0, WRITE, 1); |
363 | } | 379 | } |
364 | 380 | ||
365 | /* | 381 | /* |
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store, | |||
679 | ps->valid = 1; | 695 | ps->valid = 1; |
680 | ps->version = SNAPSHOT_DISK_VERSION; | 696 | ps->version = SNAPSHOT_DISK_VERSION; |
681 | ps->area = NULL; | 697 | ps->area = NULL; |
698 | ps->zero_area = NULL; | ||
699 | ps->header_area = NULL; | ||
682 | ps->next_free = 2; /* skipping the header and first area */ | 700 | ps->next_free = 2; /* skipping the header and first area */ |
683 | ps->current_committed = 0; | 701 | ps->current_committed = 0; |
684 | 702 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d573165cd2b7..57f1bf7f3b7a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | |||
1176 | return 0; | 1176 | return 0; |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static int snapshot_iterate_devices(struct dm_target *ti, | ||
1180 | iterate_devices_callout_fn fn, void *data) | ||
1181 | { | ||
1182 | struct dm_snapshot *snap = ti->private; | ||
1183 | |||
1184 | return fn(ti, snap->origin, 0, ti->len, data); | ||
1185 | } | ||
1186 | |||
1187 | |||
1179 | /*----------------------------------------------------------------- | 1188 | /*----------------------------------------------------------------- |
1180 | * Origin methods | 1189 | * Origin methods |
1181 | *---------------------------------------------------------------*/ | 1190 | *---------------------------------------------------------------*/ |
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |||
1410 | return 0; | 1419 | return 0; |
1411 | } | 1420 | } |
1412 | 1421 | ||
1422 | static int origin_iterate_devices(struct dm_target *ti, | ||
1423 | iterate_devices_callout_fn fn, void *data) | ||
1424 | { | ||
1425 | struct dm_dev *dev = ti->private; | ||
1426 | |||
1427 | return fn(ti, dev, 0, ti->len, data); | ||
1428 | } | ||
1429 | |||
1413 | static struct target_type origin_target = { | 1430 | static struct target_type origin_target = { |
1414 | .name = "snapshot-origin", | 1431 | .name = "snapshot-origin", |
1415 | .version = {1, 6, 0}, | 1432 | .version = {1, 7, 0}, |
1416 | .module = THIS_MODULE, | 1433 | .module = THIS_MODULE, |
1417 | .ctr = origin_ctr, | 1434 | .ctr = origin_ctr, |
1418 | .dtr = origin_dtr, | 1435 | .dtr = origin_dtr, |
1419 | .map = origin_map, | 1436 | .map = origin_map, |
1420 | .resume = origin_resume, | 1437 | .resume = origin_resume, |
1421 | .status = origin_status, | 1438 | .status = origin_status, |
1439 | .iterate_devices = origin_iterate_devices, | ||
1422 | }; | 1440 | }; |
1423 | 1441 | ||
1424 | static struct target_type snapshot_target = { | 1442 | static struct target_type snapshot_target = { |
1425 | .name = "snapshot", | 1443 | .name = "snapshot", |
1426 | .version = {1, 6, 0}, | 1444 | .version = {1, 7, 0}, |
1427 | .module = THIS_MODULE, | 1445 | .module = THIS_MODULE, |
1428 | .ctr = snapshot_ctr, | 1446 | .ctr = snapshot_ctr, |
1429 | .dtr = snapshot_dtr, | 1447 | .dtr = snapshot_dtr, |
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = { | |||
1431 | .end_io = snapshot_end_io, | 1449 | .end_io = snapshot_end_io, |
1432 | .resume = snapshot_resume, | 1450 | .resume = snapshot_resume, |
1433 | .status = snapshot_status, | 1451 | .status = snapshot_status, |
1452 | .iterate_devices = snapshot_iterate_devices, | ||
1434 | }; | 1453 | }; |
1435 | 1454 | ||
1436 | static int __init dm_snapshot_init(void) | 1455 | static int __init dm_snapshot_init(void) |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4e0e5937e42a..3e563d251733 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti, | |||
329 | return ret; | 329 | return ret; |
330 | } | 330 | } |
331 | 331 | ||
332 | static void stripe_io_hints(struct dm_target *ti, | ||
333 | struct queue_limits *limits) | ||
334 | { | ||
335 | struct stripe_c *sc = ti->private; | ||
336 | unsigned chunk_size = (sc->chunk_mask + 1) << 9; | ||
337 | |||
338 | blk_limits_io_min(limits, chunk_size); | ||
339 | limits->io_opt = chunk_size * sc->stripes; | ||
340 | } | ||
341 | |||
332 | static struct target_type stripe_target = { | 342 | static struct target_type stripe_target = { |
333 | .name = "striped", | 343 | .name = "striped", |
334 | .version = {1, 2, 0}, | 344 | .version = {1, 3, 0}, |
335 | .module = THIS_MODULE, | 345 | .module = THIS_MODULE, |
336 | .ctr = stripe_ctr, | 346 | .ctr = stripe_ctr, |
337 | .dtr = stripe_dtr, | 347 | .dtr = stripe_dtr, |
@@ -339,6 +349,7 @@ static struct target_type stripe_target = { | |||
339 | .end_io = stripe_end_io, | 349 | .end_io = stripe_end_io, |
340 | .status = stripe_status, | 350 | .status = stripe_status, |
341 | .iterate_devices = stripe_iterate_devices, | 351 | .iterate_devices = stripe_iterate_devices, |
352 | .io_hints = stripe_io_hints, | ||
342 | }; | 353 | }; |
343 | 354 | ||
344 | int __init dm_stripe_init(void) | 355 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d952b3441913..1a6cb3c7822e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * If possible, this checks an area of a destination device is valid. | 346 | * If possible, this checks an area of a destination device is invalid. |
347 | */ | 347 | */ |
348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | 348 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
349 | sector_t start, sector_t len, void *data) | 349 | sector_t start, sector_t len, void *data) |
350 | { | 350 | { |
351 | struct queue_limits *limits = data; | 351 | struct queue_limits *limits = data; |
352 | struct block_device *bdev = dev->bdev; | 352 | struct block_device *bdev = dev->bdev; |
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | |||
357 | char b[BDEVNAME_SIZE]; | 357 | char b[BDEVNAME_SIZE]; |
358 | 358 | ||
359 | if (!dev_size) | 359 | if (!dev_size) |
360 | return 1; | 360 | return 0; |
361 | 361 | ||
362 | if ((start >= dev_size) || (start + len > dev_size)) { | 362 | if ((start >= dev_size) || (start + len > dev_size)) { |
363 | DMWARN("%s: %s too small for target", | 363 | DMWARN("%s: %s too small for target: " |
364 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 364 | "start=%llu, len=%llu, dev_size=%llu", |
365 | return 0; | 365 | dm_device_name(ti->table->md), bdevname(bdev, b), |
366 | (unsigned long long)start, | ||
367 | (unsigned long long)len, | ||
368 | (unsigned long long)dev_size); | ||
369 | return 1; | ||
366 | } | 370 | } |
367 | 371 | ||
368 | if (logical_block_size_sectors <= 1) | 372 | if (logical_block_size_sectors <= 1) |
369 | return 1; | 373 | return 0; |
370 | 374 | ||
371 | if (start & (logical_block_size_sectors - 1)) { | 375 | if (start & (logical_block_size_sectors - 1)) { |
372 | DMWARN("%s: start=%llu not aligned to h/w " | 376 | DMWARN("%s: start=%llu not aligned to h/w " |
373 | "logical block size %hu of %s", | 377 | "logical block size %u of %s", |
374 | dm_device_name(ti->table->md), | 378 | dm_device_name(ti->table->md), |
375 | (unsigned long long)start, | 379 | (unsigned long long)start, |
376 | limits->logical_block_size, bdevname(bdev, b)); | 380 | limits->logical_block_size, bdevname(bdev, b)); |
377 | return 0; | 381 | return 1; |
378 | } | 382 | } |
379 | 383 | ||
380 | if (len & (logical_block_size_sectors - 1)) { | 384 | if (len & (logical_block_size_sectors - 1)) { |
381 | DMWARN("%s: len=%llu not aligned to h/w " | 385 | DMWARN("%s: len=%llu not aligned to h/w " |
382 | "logical block size %hu of %s", | 386 | "logical block size %u of %s", |
383 | dm_device_name(ti->table->md), | 387 | dm_device_name(ti->table->md), |
384 | (unsigned long long)len, | 388 | (unsigned long long)len, |
385 | limits->logical_block_size, bdevname(bdev, b)); | 389 | limits->logical_block_size, bdevname(bdev, b)); |
386 | return 0; | 390 | return 1; |
387 | } | 391 | } |
388 | 392 | ||
389 | return 1; | 393 | return 0; |
390 | } | 394 | } |
391 | 395 | ||
392 | /* | 396 | /* |
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
496 | } | 500 | } |
497 | 501 | ||
498 | if (blk_stack_limits(limits, &q->limits, start << 9) < 0) | 502 | if (blk_stack_limits(limits, &q->limits, start << 9) < 0) |
499 | DMWARN("%s: target device %s is misaligned", | 503 | DMWARN("%s: target device %s is misaligned: " |
500 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 504 | "physical_block_size=%u, logical_block_size=%u, " |
505 | "alignment_offset=%u, start=%llu", | ||
506 | dm_device_name(ti->table->md), bdevname(bdev, b), | ||
507 | q->limits.physical_block_size, | ||
508 | q->limits.logical_block_size, | ||
509 | q->limits.alignment_offset, | ||
510 | (unsigned long long) start << 9); | ||
511 | |||
501 | 512 | ||
502 | /* | 513 | /* |
503 | * Check if merge fn is supported. | 514 | * Check if merge fn is supported. |
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, | |||
698 | 709 | ||
699 | if (remaining) { | 710 | if (remaining) { |
700 | DMWARN("%s: table line %u (start sect %llu len %llu) " | 711 | DMWARN("%s: table line %u (start sect %llu len %llu) " |
701 | "not aligned to h/w logical block size %hu", | 712 | "not aligned to h/w logical block size %u", |
702 | dm_device_name(table->md), i, | 713 | dm_device_name(table->md), i, |
703 | (unsigned long long) ti->begin, | 714 | (unsigned long long) ti->begin, |
704 | (unsigned long long) ti->len, | 715 | (unsigned long long) ti->len, |
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table, | |||
996 | ti->type->iterate_devices(ti, dm_set_device_limits, | 1007 | ti->type->iterate_devices(ti, dm_set_device_limits, |
997 | &ti_limits); | 1008 | &ti_limits); |
998 | 1009 | ||
1010 | /* Set I/O hints portion of queue limits */ | ||
1011 | if (ti->type->io_hints) | ||
1012 | ti->type->io_hints(ti, &ti_limits); | ||
1013 | |||
999 | /* | 1014 | /* |
1000 | * Check each device area is consistent with the target's | 1015 | * Check each device area is consistent with the target's |
1001 | * overall queue limits. | 1016 | * overall queue limits. |
1002 | */ | 1017 | */ |
1003 | if (!ti->type->iterate_devices(ti, device_area_is_valid, | 1018 | if (ti->type->iterate_devices(ti, device_area_is_invalid, |
1004 | &ti_limits)) | 1019 | &ti_limits)) |
1005 | return -EINVAL; | 1020 | return -EINVAL; |
1006 | 1021 | ||
1007 | combine_limits: | 1022 | combine_limits: |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8a311ea0d441..b4845b14740d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue) | |||
738 | dm_put(md); | 738 | dm_put(md); |
739 | } | 739 | } |
740 | 740 | ||
741 | static void free_rq_clone(struct request *clone) | ||
742 | { | ||
743 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
744 | |||
745 | blk_rq_unprep_clone(clone); | ||
746 | free_rq_tio(tio); | ||
747 | } | ||
748 | |||
741 | static void dm_unprep_request(struct request *rq) | 749 | static void dm_unprep_request(struct request *rq) |
742 | { | 750 | { |
743 | struct request *clone = rq->special; | 751 | struct request *clone = rq->special; |
744 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
745 | 752 | ||
746 | rq->special = NULL; | 753 | rq->special = NULL; |
747 | rq->cmd_flags &= ~REQ_DONTPREP; | 754 | rq->cmd_flags &= ~REQ_DONTPREP; |
748 | 755 | ||
749 | blk_rq_unprep_clone(clone); | 756 | free_rq_clone(clone); |
750 | free_rq_tio(tio); | ||
751 | } | 757 | } |
752 | 758 | ||
753 | /* | 759 | /* |
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error) | |||
825 | rq->sense_len = clone->sense_len; | 831 | rq->sense_len = clone->sense_len; |
826 | } | 832 | } |
827 | 833 | ||
828 | BUG_ON(clone->bio); | 834 | free_rq_clone(clone); |
829 | free_rq_tio(tio); | ||
830 | 835 | ||
831 | blk_end_request_all(rq, error); | 836 | blk_end_request_all(rq, error); |
832 | 837 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5b98bea4ff9b..9dd872000cec 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -359,6 +359,7 @@ static mddev_t * mddev_find(dev_t unit) | |||
359 | else | 359 | else |
360 | new->md_minor = MINOR(unit) >> MdpMinorShift; | 360 | new->md_minor = MINOR(unit) >> MdpMinorShift; |
361 | 361 | ||
362 | mutex_init(&new->open_mutex); | ||
362 | mutex_init(&new->reconfig_mutex); | 363 | mutex_init(&new->reconfig_mutex); |
363 | INIT_LIST_HEAD(&new->disks); | 364 | INIT_LIST_HEAD(&new->disks); |
364 | INIT_LIST_HEAD(&new->all_mddevs); | 365 | INIT_LIST_HEAD(&new->all_mddevs); |
@@ -1974,17 +1975,14 @@ repeat: | |||
1974 | /* otherwise we have to go forward and ... */ | 1975 | /* otherwise we have to go forward and ... */ |
1975 | mddev->events ++; | 1976 | mddev->events ++; |
1976 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ | 1977 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ |
1977 | /* .. if the array isn't clean, insist on an odd 'events' */ | 1978 | /* .. if the array isn't clean, an 'even' event must also go |
1978 | if ((mddev->events&1)==0) { | 1979 | * to spares. */ |
1979 | mddev->events++; | 1980 | if ((mddev->events&1)==0) |
1980 | nospares = 0; | 1981 | nospares = 0; |
1981 | } | ||
1982 | } else { | 1982 | } else { |
1983 | /* otherwise insist on an even 'events' (for clean states) */ | 1983 | /* otherwise an 'odd' event must go to spares */ |
1984 | if ((mddev->events&1)) { | 1984 | if ((mddev->events&1)) |
1985 | mddev->events++; | ||
1986 | nospares = 0; | 1985 | nospares = 0; |
1987 | } | ||
1988 | } | 1986 | } |
1989 | } | 1987 | } |
1990 | 1988 | ||
@@ -3601,6 +3599,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) | |||
3601 | if (max < mddev->resync_min) | 3599 | if (max < mddev->resync_min) |
3602 | return -EINVAL; | 3600 | return -EINVAL; |
3603 | if (max < mddev->resync_max && | 3601 | if (max < mddev->resync_max && |
3602 | mddev->ro == 0 && | ||
3604 | test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 3603 | test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
3605 | return -EBUSY; | 3604 | return -EBUSY; |
3606 | 3605 | ||
@@ -4304,12 +4303,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4304 | struct gendisk *disk = mddev->gendisk; | 4303 | struct gendisk *disk = mddev->gendisk; |
4305 | mdk_rdev_t *rdev; | 4304 | mdk_rdev_t *rdev; |
4306 | 4305 | ||
4306 | mutex_lock(&mddev->open_mutex); | ||
4307 | if (atomic_read(&mddev->openers) > is_open) { | 4307 | if (atomic_read(&mddev->openers) > is_open) { |
4308 | printk("md: %s still in use.\n",mdname(mddev)); | 4308 | printk("md: %s still in use.\n",mdname(mddev)); |
4309 | return -EBUSY; | 4309 | err = -EBUSY; |
4310 | } | 4310 | } else if (mddev->pers) { |
4311 | |||
4312 | if (mddev->pers) { | ||
4313 | 4311 | ||
4314 | if (mddev->sync_thread) { | 4312 | if (mddev->sync_thread) { |
4315 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4313 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
@@ -4366,8 +4364,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4366 | if (mode == 1) | 4364 | if (mode == 1) |
4367 | set_disk_ro(disk, 1); | 4365 | set_disk_ro(disk, 1); |
4368 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4366 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
4367 | err = 0; | ||
4369 | } | 4368 | } |
4370 | 4369 | out: | |
4370 | mutex_unlock(&mddev->open_mutex); | ||
4371 | if (err) | ||
4372 | return err; | ||
4371 | /* | 4373 | /* |
4372 | * Free resources if final stop | 4374 | * Free resources if final stop |
4373 | */ | 4375 | */ |
@@ -4433,7 +4435,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4433 | blk_integrity_unregister(disk); | 4435 | blk_integrity_unregister(disk); |
4434 | md_new_event(mddev); | 4436 | md_new_event(mddev); |
4435 | sysfs_notify_dirent(mddev->sysfs_state); | 4437 | sysfs_notify_dirent(mddev->sysfs_state); |
4436 | out: | ||
4437 | return err; | 4438 | return err; |
4438 | } | 4439 | } |
4439 | 4440 | ||
@@ -5518,12 +5519,12 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
5518 | } | 5519 | } |
5519 | BUG_ON(mddev != bdev->bd_disk->private_data); | 5520 | BUG_ON(mddev != bdev->bd_disk->private_data); |
5520 | 5521 | ||
5521 | if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) | 5522 | if ((err = mutex_lock_interruptible(&mddev->open_mutex))) |
5522 | goto out; | 5523 | goto out; |
5523 | 5524 | ||
5524 | err = 0; | 5525 | err = 0; |
5525 | atomic_inc(&mddev->openers); | 5526 | atomic_inc(&mddev->openers); |
5526 | mddev_unlock(mddev); | 5527 | mutex_unlock(&mddev->open_mutex); |
5527 | 5528 | ||
5528 | check_disk_change(bdev); | 5529 | check_disk_change(bdev); |
5529 | out: | 5530 | out: |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 78f03168baf9..f8fc188bc762 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -223,6 +223,16 @@ struct mddev_s | |||
223 | * so we don't loop trying */ | 223 | * so we don't loop trying */ |
224 | 224 | ||
225 | int in_sync; /* know to not need resync */ | 225 | int in_sync; /* know to not need resync */ |
226 | /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so | ||
227 | * that we are never stopping an array while it is open. | ||
228 | * 'reconfig_mutex' protects all other reconfiguration. | ||
229 | * These locks are separate due to conflicting interactions | ||
230 | * with bdev->bd_mutex. | ||
231 | * Lock ordering is: | ||
232 | * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk | ||
233 | * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open | ||
234 | */ | ||
235 | struct mutex open_mutex; | ||
226 | struct mutex reconfig_mutex; | 236 | struct mutex reconfig_mutex; |
227 | atomic_t active; /* general refcount */ | 237 | atomic_t active; /* general refcount */ |
228 | atomic_t openers; /* number of active opens */ | 238 | atomic_t openers; /* number of active opens */ |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2b521ee67dfa..b8a2c5dc67ba 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3785,7 +3785,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3785 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { | 3785 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { |
3786 | sector_nr = raid5_size(mddev, 0, 0) | 3786 | sector_nr = raid5_size(mddev, 0, 0) |
3787 | - conf->reshape_progress; | 3787 | - conf->reshape_progress; |
3788 | } else if (mddev->delta_disks > 0 && | 3788 | } else if (mddev->delta_disks >= 0 && |
3789 | conf->reshape_progress > 0) | 3789 | conf->reshape_progress > 0) |
3790 | sector_nr = conf->reshape_progress; | 3790 | sector_nr = conf->reshape_progress; |
3791 | sector_div(sector_nr, new_data_disks); | 3791 | sector_div(sector_nr, new_data_disks); |
@@ -4509,7 +4509,26 @@ static int run(mddev_t *mddev) | |||
4509 | (old_disks-max_degraded)); | 4509 | (old_disks-max_degraded)); |
4510 | /* here_old is the first stripe that we might need to read | 4510 | /* here_old is the first stripe that we might need to read |
4511 | * from */ | 4511 | * from */ |
4512 | if (here_new >= here_old) { | 4512 | if (mddev->delta_disks == 0) { |
4513 | /* We cannot be sure it is safe to start an in-place | ||
4514 | * reshape. It is only safe if user-space if monitoring | ||
4515 | * and taking constant backups. | ||
4516 | * mdadm always starts a situation like this in | ||
4517 | * readonly mode so it can take control before | ||
4518 | * allowing any writes. So just check for that. | ||
4519 | */ | ||
4520 | if ((here_new * mddev->new_chunk_sectors != | ||
4521 | here_old * mddev->chunk_sectors) || | ||
4522 | mddev->ro == 0) { | ||
4523 | printk(KERN_ERR "raid5: in-place reshape must be started" | ||
4524 | " in read-only mode - aborting\n"); | ||
4525 | return -EINVAL; | ||
4526 | } | ||
4527 | } else if (mddev->delta_disks < 0 | ||
4528 | ? (here_new * mddev->new_chunk_sectors <= | ||
4529 | here_old * mddev->chunk_sectors) | ||
4530 | : (here_new * mddev->new_chunk_sectors >= | ||
4531 | here_old * mddev->chunk_sectors)) { | ||
4513 | /* Reading from the same stripe as writing to - bad */ | 4532 | /* Reading from the same stripe as writing to - bad */ |
4514 | printk(KERN_ERR "raid5: reshape_position too early for " | 4533 | printk(KERN_ERR "raid5: reshape_position too early for " |
4515 | "auto-recovery - aborting.\n"); | 4534 | "auto-recovery - aborting.\n"); |
@@ -5078,8 +5097,15 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5078 | mddev->degraded--; | 5097 | mddev->degraded--; |
5079 | for (d = conf->raid_disks ; | 5098 | for (d = conf->raid_disks ; |
5080 | d < conf->raid_disks - mddev->delta_disks; | 5099 | d < conf->raid_disks - mddev->delta_disks; |
5081 | d++) | 5100 | d++) { |
5082 | raid5_remove_disk(mddev, d); | 5101 | mdk_rdev_t *rdev = conf->disks[d].rdev; |
5102 | if (rdev && raid5_remove_disk(mddev, d) == 0) { | ||
5103 | char nm[20]; | ||
5104 | sprintf(nm, "rd%d", rdev->raid_disk); | ||
5105 | sysfs_remove_link(&mddev->kobj, nm); | ||
5106 | rdev->raid_disk = -1; | ||
5107 | } | ||
5108 | } | ||
5083 | } | 5109 | } |
5084 | mddev->layout = conf->algorithm; | 5110 | mddev->layout = conf->algorithm; |
5085 | mddev->chunk_sectors = conf->chunk_sectors; | 5111 | mddev->chunk_sectors = conf->chunk_sectors; |
diff --git a/drivers/media/common/tuners/qt1010.c b/drivers/media/common/tuners/qt1010.c index 825aa1412e6f..9f5dba244cb8 100644 --- a/drivers/media/common/tuners/qt1010.c +++ b/drivers/media/common/tuners/qt1010.c | |||
@@ -64,24 +64,22 @@ static int qt1010_writereg(struct qt1010_priv *priv, u8 reg, u8 val) | |||
64 | /* dump all registers */ | 64 | /* dump all registers */ |
65 | static void qt1010_dump_regs(struct qt1010_priv *priv) | 65 | static void qt1010_dump_regs(struct qt1010_priv *priv) |
66 | { | 66 | { |
67 | char buf[52], buf2[4]; | ||
68 | u8 reg, val; | 67 | u8 reg, val; |
69 | 68 | ||
70 | for (reg = 0; ; reg++) { | 69 | for (reg = 0; ; reg++) { |
71 | if (reg % 16 == 0) { | 70 | if (reg % 16 == 0) { |
72 | if (reg) | 71 | if (reg) |
73 | printk("%s\n", buf); | 72 | printk(KERN_CONT "\n"); |
74 | sprintf(buf, "%02x: ", reg); | 73 | printk(KERN_DEBUG "%02x:", reg); |
75 | } | 74 | } |
76 | if (qt1010_readreg(priv, reg, &val) == 0) | 75 | if (qt1010_readreg(priv, reg, &val) == 0) |
77 | sprintf(buf2, "%02x ", val); | 76 | printk(KERN_CONT " %02x", val); |
78 | else | 77 | else |
79 | strcpy(buf2, "-- "); | 78 | printk(KERN_CONT " --"); |
80 | strcat(buf, buf2); | ||
81 | if (reg == 0x2f) | 79 | if (reg == 0x2f) |
82 | break; | 80 | break; |
83 | } | 81 | } |
84 | printk("%s\n", buf); | 82 | printk(KERN_CONT "\n"); |
85 | } | 83 | } |
86 | 84 | ||
87 | static int qt1010_set_params(struct dvb_frontend *fe, | 85 | static int qt1010_set_params(struct dvb_frontend *fe, |
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c index aa20ce8cc668..f270e605da83 100644 --- a/drivers/media/common/tuners/tuner-xc2028.c +++ b/drivers/media/common/tuners/tuner-xc2028.c | |||
@@ -1119,8 +1119,8 @@ static int xc2028_sleep(struct dvb_frontend *fe) | |||
1119 | struct xc2028_data *priv = fe->tuner_priv; | 1119 | struct xc2028_data *priv = fe->tuner_priv; |
1120 | int rc = 0; | 1120 | int rc = 0; |
1121 | 1121 | ||
1122 | /* Avoid firmware reload on slow devices */ | 1122 | /* Avoid firmware reload on slow devices or if PM disabled */ |
1123 | if (no_poweroff) | 1123 | if (no_poweroff || priv->ctrl.disable_power_mgmt) |
1124 | return 0; | 1124 | return 0; |
1125 | 1125 | ||
1126 | tuner_dbg("Putting xc2028/3028 into poweroff mode.\n"); | 1126 | tuner_dbg("Putting xc2028/3028 into poweroff mode.\n"); |
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h index 19de7928a74e..a90c35d50add 100644 --- a/drivers/media/common/tuners/tuner-xc2028.h +++ b/drivers/media/common/tuners/tuner-xc2028.h | |||
@@ -38,6 +38,7 @@ struct xc2028_ctrl { | |||
38 | unsigned int input1:1; | 38 | unsigned int input1:1; |
39 | unsigned int vhfbw7:1; | 39 | unsigned int vhfbw7:1; |
40 | unsigned int uhfbw8:1; | 40 | unsigned int uhfbw8:1; |
41 | unsigned int disable_power_mgmt:1; | ||
41 | unsigned int demod; | 42 | unsigned int demod; |
42 | enum firmware_type type:2; | 43 | enum firmware_type type:2; |
43 | }; | 44 | }; |
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c index 4cb31e7c13c2..26690dfb3260 100644 --- a/drivers/media/dvb/dvb-usb/af9015.c +++ b/drivers/media/dvb/dvb-usb/af9015.c | |||
@@ -81,7 +81,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req) | |||
81 | 81 | ||
82 | switch (req->cmd) { | 82 | switch (req->cmd) { |
83 | case GET_CONFIG: | 83 | case GET_CONFIG: |
84 | case BOOT: | ||
85 | case READ_MEMORY: | 84 | case READ_MEMORY: |
86 | case RECONNECT_USB: | 85 | case RECONNECT_USB: |
87 | case GET_IR_CODE: | 86 | case GET_IR_CODE: |
@@ -100,6 +99,7 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req) | |||
100 | case WRITE_VIRTUAL_MEMORY: | 99 | case WRITE_VIRTUAL_MEMORY: |
101 | case COPY_FIRMWARE: | 100 | case COPY_FIRMWARE: |
102 | case DOWNLOAD_FIRMWARE: | 101 | case DOWNLOAD_FIRMWARE: |
102 | case BOOT: | ||
103 | break; | 103 | break; |
104 | default: | 104 | default: |
105 | err("unknown command:%d", req->cmd); | 105 | err("unknown command:%d", req->cmd); |
diff --git a/drivers/media/dvb/frontends/cx22700.c b/drivers/media/dvb/frontends/cx22700.c index ace5cb17165d..fbd838eca268 100644 --- a/drivers/media/dvb/frontends/cx22700.c +++ b/drivers/media/dvb/frontends/cx22700.c | |||
@@ -380,7 +380,7 @@ struct dvb_frontend* cx22700_attach(const struct cx22700_config* config, | |||
380 | struct cx22700_state* state = NULL; | 380 | struct cx22700_state* state = NULL; |
381 | 381 | ||
382 | /* allocate memory for the internal state */ | 382 | /* allocate memory for the internal state */ |
383 | state = kmalloc(sizeof(struct cx22700_state), GFP_KERNEL); | 383 | state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL); |
384 | if (state == NULL) goto error; | 384 | if (state == NULL) goto error; |
385 | 385 | ||
386 | /* setup the state */ | 386 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c index 5d1abe34bddb..00b5c7e91d5d 100644 --- a/drivers/media/dvb/frontends/cx22702.c +++ b/drivers/media/dvb/frontends/cx22702.c | |||
@@ -580,7 +580,7 @@ struct dvb_frontend *cx22702_attach(const struct cx22702_config *config, | |||
580 | struct cx22702_state *state = NULL; | 580 | struct cx22702_state *state = NULL; |
581 | 581 | ||
582 | /* allocate memory for the internal state */ | 582 | /* allocate memory for the internal state */ |
583 | state = kmalloc(sizeof(struct cx22702_state), GFP_KERNEL); | 583 | state = kzalloc(sizeof(struct cx22702_state), GFP_KERNEL); |
584 | if (state == NULL) | 584 | if (state == NULL) |
585 | goto error; | 585 | goto error; |
586 | 586 | ||
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c index 87ae29db024f..ffbcfabd83f0 100644 --- a/drivers/media/dvb/frontends/cx24110.c +++ b/drivers/media/dvb/frontends/cx24110.c | |||
@@ -598,7 +598,7 @@ struct dvb_frontend* cx24110_attach(const struct cx24110_config* config, | |||
598 | int ret; | 598 | int ret; |
599 | 599 | ||
600 | /* allocate memory for the internal state */ | 600 | /* allocate memory for the internal state */ |
601 | state = kmalloc(sizeof(struct cx24110_state), GFP_KERNEL); | 601 | state = kzalloc(sizeof(struct cx24110_state), GFP_KERNEL); |
602 | if (state == NULL) goto error; | 602 | if (state == NULL) goto error; |
603 | 603 | ||
604 | /* setup the state */ | 604 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/dvb_dummy_fe.c b/drivers/media/dvb/frontends/dvb_dummy_fe.c index db8a937cc630..a7fc7e53a551 100644 --- a/drivers/media/dvb/frontends/dvb_dummy_fe.c +++ b/drivers/media/dvb/frontends/dvb_dummy_fe.c | |||
@@ -117,7 +117,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void) | |||
117 | struct dvb_dummy_fe_state* state = NULL; | 117 | struct dvb_dummy_fe_state* state = NULL; |
118 | 118 | ||
119 | /* allocate memory for the internal state */ | 119 | /* allocate memory for the internal state */ |
120 | state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); | 120 | state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); |
121 | if (state == NULL) goto error; | 121 | if (state == NULL) goto error; |
122 | 122 | ||
123 | /* create dvb_frontend */ | 123 | /* create dvb_frontend */ |
@@ -137,7 +137,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void) | |||
137 | struct dvb_dummy_fe_state* state = NULL; | 137 | struct dvb_dummy_fe_state* state = NULL; |
138 | 138 | ||
139 | /* allocate memory for the internal state */ | 139 | /* allocate memory for the internal state */ |
140 | state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); | 140 | state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); |
141 | if (state == NULL) goto error; | 141 | if (state == NULL) goto error; |
142 | 142 | ||
143 | /* create dvb_frontend */ | 143 | /* create dvb_frontend */ |
@@ -157,7 +157,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void) | |||
157 | struct dvb_dummy_fe_state* state = NULL; | 157 | struct dvb_dummy_fe_state* state = NULL; |
158 | 158 | ||
159 | /* allocate memory for the internal state */ | 159 | /* allocate memory for the internal state */ |
160 | state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); | 160 | state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); |
161 | if (state == NULL) goto error; | 161 | if (state == NULL) goto error; |
162 | 162 | ||
163 | /* create dvb_frontend */ | 163 | /* create dvb_frontend */ |
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c index e1e70e9e0cb9..3051b64aa17c 100644 --- a/drivers/media/dvb/frontends/l64781.c +++ b/drivers/media/dvb/frontends/l64781.c | |||
@@ -501,7 +501,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config, | |||
501 | { .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; | 501 | { .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; |
502 | 502 | ||
503 | /* allocate memory for the internal state */ | 503 | /* allocate memory for the internal state */ |
504 | state = kmalloc(sizeof(struct l64781_state), GFP_KERNEL); | 504 | state = kzalloc(sizeof(struct l64781_state), GFP_KERNEL); |
505 | if (state == NULL) goto error; | 505 | if (state == NULL) goto error; |
506 | 506 | ||
507 | /* setup the state */ | 507 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/lgs8gl5.c b/drivers/media/dvb/frontends/lgs8gl5.c index 855852fddf22..bb37ed289a05 100644 --- a/drivers/media/dvb/frontends/lgs8gl5.c +++ b/drivers/media/dvb/frontends/lgs8gl5.c | |||
@@ -387,7 +387,7 @@ lgs8gl5_attach(const struct lgs8gl5_config *config, struct i2c_adapter *i2c) | |||
387 | dprintk("%s\n", __func__); | 387 | dprintk("%s\n", __func__); |
388 | 388 | ||
389 | /* Allocate memory for the internal state */ | 389 | /* Allocate memory for the internal state */ |
390 | state = kmalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL); | 390 | state = kzalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL); |
391 | if (state == NULL) | 391 | if (state == NULL) |
392 | goto error; | 392 | goto error; |
393 | 393 | ||
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c index a621f727935f..f69daaac78c9 100644 --- a/drivers/media/dvb/frontends/mt312.c +++ b/drivers/media/dvb/frontends/mt312.c | |||
@@ -782,7 +782,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config, | |||
782 | struct mt312_state *state = NULL; | 782 | struct mt312_state *state = NULL; |
783 | 783 | ||
784 | /* allocate memory for the internal state */ | 784 | /* allocate memory for the internal state */ |
785 | state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL); | 785 | state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL); |
786 | if (state == NULL) | 786 | if (state == NULL) |
787 | goto error; | 787 | goto error; |
788 | 788 | ||
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c index 0eef22dbf8a0..a763ec756f7f 100644 --- a/drivers/media/dvb/frontends/nxt6000.c +++ b/drivers/media/dvb/frontends/nxt6000.c | |||
@@ -545,7 +545,7 @@ struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config, | |||
545 | struct nxt6000_state* state = NULL; | 545 | struct nxt6000_state* state = NULL; |
546 | 546 | ||
547 | /* allocate memory for the internal state */ | 547 | /* allocate memory for the internal state */ |
548 | state = kmalloc(sizeof(struct nxt6000_state), GFP_KERNEL); | 548 | state = kzalloc(sizeof(struct nxt6000_state), GFP_KERNEL); |
549 | if (state == NULL) goto error; | 549 | if (state == NULL) goto error; |
550 | 550 | ||
551 | /* setup the state */ | 551 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c index 8133ea3cddd7..38e67accb8c3 100644 --- a/drivers/media/dvb/frontends/or51132.c +++ b/drivers/media/dvb/frontends/or51132.c | |||
@@ -562,7 +562,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config, | |||
562 | struct or51132_state* state = NULL; | 562 | struct or51132_state* state = NULL; |
563 | 563 | ||
564 | /* Allocate memory for the internal state */ | 564 | /* Allocate memory for the internal state */ |
565 | state = kmalloc(sizeof(struct or51132_state), GFP_KERNEL); | 565 | state = kzalloc(sizeof(struct or51132_state), GFP_KERNEL); |
566 | if (state == NULL) | 566 | if (state == NULL) |
567 | return NULL; | 567 | return NULL; |
568 | 568 | ||
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c index 16cf2fdd5d7d..c709ce6771c8 100644 --- a/drivers/media/dvb/frontends/or51211.c +++ b/drivers/media/dvb/frontends/or51211.c | |||
@@ -527,7 +527,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config, | |||
527 | struct or51211_state* state = NULL; | 527 | struct or51211_state* state = NULL; |
528 | 528 | ||
529 | /* Allocate memory for the internal state */ | 529 | /* Allocate memory for the internal state */ |
530 | state = kmalloc(sizeof(struct or51211_state), GFP_KERNEL); | 530 | state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL); |
531 | if (state == NULL) | 531 | if (state == NULL) |
532 | return NULL; | 532 | return NULL; |
533 | 533 | ||
diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c index 3e08d985d6e5..fb3011518427 100644 --- a/drivers/media/dvb/frontends/s5h1409.c +++ b/drivers/media/dvb/frontends/s5h1409.c | |||
@@ -796,7 +796,7 @@ struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config, | |||
796 | u16 reg; | 796 | u16 reg; |
797 | 797 | ||
798 | /* allocate memory for the internal state */ | 798 | /* allocate memory for the internal state */ |
799 | state = kmalloc(sizeof(struct s5h1409_state), GFP_KERNEL); | 799 | state = kzalloc(sizeof(struct s5h1409_state), GFP_KERNEL); |
800 | if (state == NULL) | 800 | if (state == NULL) |
801 | goto error; | 801 | goto error; |
802 | 802 | ||
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c index 66e2dd6d6fe4..d8adf1e32019 100644 --- a/drivers/media/dvb/frontends/s5h1411.c +++ b/drivers/media/dvb/frontends/s5h1411.c | |||
@@ -844,7 +844,7 @@ struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config, | |||
844 | u16 reg; | 844 | u16 reg; |
845 | 845 | ||
846 | /* allocate memory for the internal state */ | 846 | /* allocate memory for the internal state */ |
847 | state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL); | 847 | state = kzalloc(sizeof(struct s5h1411_state), GFP_KERNEL); |
848 | if (state == NULL) | 848 | if (state == NULL) |
849 | goto error; | 849 | goto error; |
850 | 850 | ||
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c index 0bd16af8a6cd..9552a22ccffb 100644 --- a/drivers/media/dvb/frontends/si21xx.c +++ b/drivers/media/dvb/frontends/si21xx.c | |||
@@ -928,7 +928,7 @@ struct dvb_frontend *si21xx_attach(const struct si21xx_config *config, | |||
928 | dprintk("%s\n", __func__); | 928 | dprintk("%s\n", __func__); |
929 | 929 | ||
930 | /* allocate memory for the internal state */ | 930 | /* allocate memory for the internal state */ |
931 | state = kmalloc(sizeof(struct si21xx_state), GFP_KERNEL); | 931 | state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL); |
932 | if (state == NULL) | 932 | if (state == NULL) |
933 | goto error; | 933 | goto error; |
934 | 934 | ||
diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c index 1c9a9b4051b9..b85eb60a893e 100644 --- a/drivers/media/dvb/frontends/sp8870.c +++ b/drivers/media/dvb/frontends/sp8870.c | |||
@@ -557,7 +557,7 @@ struct dvb_frontend* sp8870_attach(const struct sp8870_config* config, | |||
557 | struct sp8870_state* state = NULL; | 557 | struct sp8870_state* state = NULL; |
558 | 558 | ||
559 | /* allocate memory for the internal state */ | 559 | /* allocate memory for the internal state */ |
560 | state = kmalloc(sizeof(struct sp8870_state), GFP_KERNEL); | 560 | state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL); |
561 | if (state == NULL) goto error; | 561 | if (state == NULL) goto error; |
562 | 562 | ||
563 | /* setup the state */ | 563 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c index 559509ab4dab..4a7c3d842608 100644 --- a/drivers/media/dvb/frontends/sp887x.c +++ b/drivers/media/dvb/frontends/sp887x.c | |||
@@ -557,7 +557,7 @@ struct dvb_frontend* sp887x_attach(const struct sp887x_config* config, | |||
557 | struct sp887x_state* state = NULL; | 557 | struct sp887x_state* state = NULL; |
558 | 558 | ||
559 | /* allocate memory for the internal state */ | 559 | /* allocate memory for the internal state */ |
560 | state = kmalloc(sizeof(struct sp887x_state), GFP_KERNEL); | 560 | state = kzalloc(sizeof(struct sp887x_state), GFP_KERNEL); |
561 | if (state == NULL) goto error; | 561 | if (state == NULL) goto error; |
562 | 562 | ||
563 | /* setup the state */ | 563 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c index ff1194de34c0..2930a5d6768a 100644 --- a/drivers/media/dvb/frontends/stv0288.c +++ b/drivers/media/dvb/frontends/stv0288.c | |||
@@ -570,7 +570,7 @@ struct dvb_frontend *stv0288_attach(const struct stv0288_config *config, | |||
570 | int id; | 570 | int id; |
571 | 571 | ||
572 | /* allocate memory for the internal state */ | 572 | /* allocate memory for the internal state */ |
573 | state = kmalloc(sizeof(struct stv0288_state), GFP_KERNEL); | 573 | state = kzalloc(sizeof(struct stv0288_state), GFP_KERNEL); |
574 | if (state == NULL) | 574 | if (state == NULL) |
575 | goto error; | 575 | goto error; |
576 | 576 | ||
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c index 62caf802ed99..4fd7479bb62b 100644 --- a/drivers/media/dvb/frontends/stv0297.c +++ b/drivers/media/dvb/frontends/stv0297.c | |||
@@ -663,7 +663,7 @@ struct dvb_frontend *stv0297_attach(const struct stv0297_config *config, | |||
663 | struct stv0297_state *state = NULL; | 663 | struct stv0297_state *state = NULL; |
664 | 664 | ||
665 | /* allocate memory for the internal state */ | 665 | /* allocate memory for the internal state */ |
666 | state = kmalloc(sizeof(struct stv0297_state), GFP_KERNEL); | 666 | state = kzalloc(sizeof(struct stv0297_state), GFP_KERNEL); |
667 | if (state == NULL) | 667 | if (state == NULL) |
668 | goto error; | 668 | goto error; |
669 | 669 | ||
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c index 6c1cb1973c6e..968874469726 100644 --- a/drivers/media/dvb/frontends/stv0299.c +++ b/drivers/media/dvb/frontends/stv0299.c | |||
@@ -667,7 +667,7 @@ struct dvb_frontend* stv0299_attach(const struct stv0299_config* config, | |||
667 | int id; | 667 | int id; |
668 | 668 | ||
669 | /* allocate memory for the internal state */ | 669 | /* allocate memory for the internal state */ |
670 | state = kmalloc(sizeof(struct stv0299_state), GFP_KERNEL); | 670 | state = kzalloc(sizeof(struct stv0299_state), GFP_KERNEL); |
671 | if (state == NULL) goto error; | 671 | if (state == NULL) goto error; |
672 | 672 | ||
673 | /* setup the state */ | 673 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c index f648fdb64bb7..f5d7b3277a2f 100644 --- a/drivers/media/dvb/frontends/tda10021.c +++ b/drivers/media/dvb/frontends/tda10021.c | |||
@@ -413,7 +413,7 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config, | |||
413 | u8 id; | 413 | u8 id; |
414 | 414 | ||
415 | /* allocate memory for the internal state */ | 415 | /* allocate memory for the internal state */ |
416 | state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL); | 416 | state = kzalloc(sizeof(struct tda10021_state), GFP_KERNEL); |
417 | if (state == NULL) goto error; | 417 | if (state == NULL) goto error; |
418 | 418 | ||
419 | /* setup the state */ | 419 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c index cc8862ce4aae..4e2a7c8b2f62 100644 --- a/drivers/media/dvb/frontends/tda10048.c +++ b/drivers/media/dvb/frontends/tda10048.c | |||
@@ -1095,7 +1095,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config, | |||
1095 | dprintk(1, "%s()\n", __func__); | 1095 | dprintk(1, "%s()\n", __func__); |
1096 | 1096 | ||
1097 | /* allocate memory for the internal state */ | 1097 | /* allocate memory for the internal state */ |
1098 | state = kmalloc(sizeof(struct tda10048_state), GFP_KERNEL); | 1098 | state = kzalloc(sizeof(struct tda10048_state), GFP_KERNEL); |
1099 | if (state == NULL) | 1099 | if (state == NULL) |
1100 | goto error; | 1100 | goto error; |
1101 | 1101 | ||
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c index 4981cef8b444..f2a8abe0a243 100644 --- a/drivers/media/dvb/frontends/tda1004x.c +++ b/drivers/media/dvb/frontends/tda1004x.c | |||
@@ -1269,7 +1269,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config, | |||
1269 | int id; | 1269 | int id; |
1270 | 1270 | ||
1271 | /* allocate memory for the internal state */ | 1271 | /* allocate memory for the internal state */ |
1272 | state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL); | 1272 | state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); |
1273 | if (!state) { | 1273 | if (!state) { |
1274 | printk(KERN_ERR "Can't alocate memory for tda10045 state\n"); | 1274 | printk(KERN_ERR "Can't alocate memory for tda10045 state\n"); |
1275 | return NULL; | 1275 | return NULL; |
@@ -1339,7 +1339,7 @@ struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config, | |||
1339 | int id; | 1339 | int id; |
1340 | 1340 | ||
1341 | /* allocate memory for the internal state */ | 1341 | /* allocate memory for the internal state */ |
1342 | state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL); | 1342 | state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); |
1343 | if (!state) { | 1343 | if (!state) { |
1344 | printk(KERN_ERR "Can't alocate memory for tda10046 state\n"); | 1344 | printk(KERN_ERR "Can't alocate memory for tda10046 state\n"); |
1345 | return NULL; | 1345 | return NULL; |
diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c index a17ce3c4ad86..f2c8faac6f36 100644 --- a/drivers/media/dvb/frontends/tda10086.c +++ b/drivers/media/dvb/frontends/tda10086.c | |||
@@ -745,7 +745,7 @@ struct dvb_frontend* tda10086_attach(const struct tda10086_config* config, | |||
745 | dprintk ("%s\n", __func__); | 745 | dprintk ("%s\n", __func__); |
746 | 746 | ||
747 | /* allocate memory for the internal state */ | 747 | /* allocate memory for the internal state */ |
748 | state = kmalloc(sizeof(struct tda10086_state), GFP_KERNEL); | 748 | state = kzalloc(sizeof(struct tda10086_state), GFP_KERNEL); |
749 | if (!state) | 749 | if (!state) |
750 | return NULL; | 750 | return NULL; |
751 | 751 | ||
diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c index 5b843b2e67e8..9369f7442f27 100644 --- a/drivers/media/dvb/frontends/tda8083.c +++ b/drivers/media/dvb/frontends/tda8083.c | |||
@@ -417,7 +417,7 @@ struct dvb_frontend* tda8083_attach(const struct tda8083_config* config, | |||
417 | struct tda8083_state* state = NULL; | 417 | struct tda8083_state* state = NULL; |
418 | 418 | ||
419 | /* allocate memory for the internal state */ | 419 | /* allocate memory for the internal state */ |
420 | state = kmalloc(sizeof(struct tda8083_state), GFP_KERNEL); | 420 | state = kzalloc(sizeof(struct tda8083_state), GFP_KERNEL); |
421 | if (state == NULL) goto error; | 421 | if (state == NULL) goto error; |
422 | 422 | ||
423 | /* setup the state */ | 423 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/ves1820.c b/drivers/media/dvb/frontends/ves1820.c index a184597f1d9b..6e78e4865515 100644 --- a/drivers/media/dvb/frontends/ves1820.c +++ b/drivers/media/dvb/frontends/ves1820.c | |||
@@ -374,7 +374,7 @@ struct dvb_frontend* ves1820_attach(const struct ves1820_config* config, | |||
374 | struct ves1820_state* state = NULL; | 374 | struct ves1820_state* state = NULL; |
375 | 375 | ||
376 | /* allocate memory for the internal state */ | 376 | /* allocate memory for the internal state */ |
377 | state = kmalloc(sizeof(struct ves1820_state), GFP_KERNEL); | 377 | state = kzalloc(sizeof(struct ves1820_state), GFP_KERNEL); |
378 | if (state == NULL) | 378 | if (state == NULL) |
379 | goto error; | 379 | goto error; |
380 | 380 | ||
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c index bd558960bd87..8d7854c2fb0c 100644 --- a/drivers/media/dvb/frontends/ves1x93.c +++ b/drivers/media/dvb/frontends/ves1x93.c | |||
@@ -456,7 +456,7 @@ struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config, | |||
456 | u8 identity; | 456 | u8 identity; |
457 | 457 | ||
458 | /* allocate memory for the internal state */ | 458 | /* allocate memory for the internal state */ |
459 | state = kmalloc(sizeof(struct ves1x93_state), GFP_KERNEL); | 459 | state = kzalloc(sizeof(struct ves1x93_state), GFP_KERNEL); |
460 | if (state == NULL) goto error; | 460 | if (state == NULL) goto error; |
461 | 461 | ||
462 | /* setup the state */ | 462 | /* setup the state */ |
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c index 148b6f7f6cb2..66f5c1fb3074 100644 --- a/drivers/media/dvb/frontends/zl10353.c +++ b/drivers/media/dvb/frontends/zl10353.c | |||
@@ -98,7 +98,6 @@ static int zl10353_read_register(struct zl10353_state *state, u8 reg) | |||
98 | static void zl10353_dump_regs(struct dvb_frontend *fe) | 98 | static void zl10353_dump_regs(struct dvb_frontend *fe) |
99 | { | 99 | { |
100 | struct zl10353_state *state = fe->demodulator_priv; | 100 | struct zl10353_state *state = fe->demodulator_priv; |
101 | char buf[52], buf2[4]; | ||
102 | int ret; | 101 | int ret; |
103 | u8 reg; | 102 | u8 reg; |
104 | 103 | ||
@@ -106,19 +105,18 @@ static void zl10353_dump_regs(struct dvb_frontend *fe) | |||
106 | for (reg = 0; ; reg++) { | 105 | for (reg = 0; ; reg++) { |
107 | if (reg % 16 == 0) { | 106 | if (reg % 16 == 0) { |
108 | if (reg) | 107 | if (reg) |
109 | printk(KERN_DEBUG "%s\n", buf); | 108 | printk(KERN_CONT "\n"); |
110 | sprintf(buf, "%02x: ", reg); | 109 | printk(KERN_DEBUG "%02x:", reg); |
111 | } | 110 | } |
112 | ret = zl10353_read_register(state, reg); | 111 | ret = zl10353_read_register(state, reg); |
113 | if (ret >= 0) | 112 | if (ret >= 0) |
114 | sprintf(buf2, "%02x ", (u8)ret); | 113 | printk(KERN_CONT " %02x", (u8)ret); |
115 | else | 114 | else |
116 | strcpy(buf2, "-- "); | 115 | printk(KERN_CONT " --"); |
117 | strcat(buf, buf2); | ||
118 | if (reg == 0xff) | 116 | if (reg == 0xff) |
119 | break; | 117 | break; |
120 | } | 118 | } |
121 | printk(KERN_DEBUG "%s\n", buf); | 119 | printk(KERN_CONT "\n"); |
122 | } | 120 | } |
123 | 121 | ||
124 | static void zl10353_calc_nominal_rate(struct dvb_frontend *fe, | 122 | static void zl10353_calc_nominal_rate(struct dvb_frontend *fe, |
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig index dd863f261672..8c1aed77ea30 100644 --- a/drivers/media/dvb/siano/Kconfig +++ b/drivers/media/dvb/siano/Kconfig | |||
@@ -2,25 +2,33 @@ | |||
2 | # Siano Mobile Silicon Digital TV device configuration | 2 | # Siano Mobile Silicon Digital TV device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config DVB_SIANO_SMS1XXX | 5 | config SMS_SIANO_MDTV |
6 | tristate "Siano SMS1XXX USB dongle support" | 6 | tristate "Siano SMS1xxx based MDTV receiver" |
7 | depends on DVB_CORE && USB | 7 | depends on DVB_CORE && INPUT |
8 | ---help--- | 8 | ---help--- |
9 | Choose Y here if you have a USB dongle with a SMS1XXX chipset. | 9 | Choose Y or M here if you have MDTV receiver with a Siano chipset. |
10 | 10 | ||
11 | To compile this driver as a module, choose M here: the | 11 | To compile this driver as a module, choose M here |
12 | module will be called sms1xxx. | 12 | (The module will be called smsmdtv). |
13 | 13 | ||
14 | config DVB_SIANO_SMS1XXX_SMS_IDS | 14 | Further documentation on this driver can be found on the WWW |
15 | bool "Enable support for Siano Mobile Silicon default USB IDs" | 15 | at http://www.siano-ms.com/ |
16 | depends on DVB_SIANO_SMS1XXX | 16 | |
17 | default y | 17 | if SMS_SIANO_MDTV |
18 | ---help--- | 18 | menu "Siano module components" |
19 | Choose Y here if you have a USB dongle with a SMS1XXX chipset | ||
20 | that uses Siano Mobile Silicon's default usb vid:pid. | ||
21 | 19 | ||
22 | Choose N here if you would prefer to use Siano's external driver. | 20 | # Hardware interfaces support |
23 | 21 | ||
24 | Further documentation on this driver can be found on the WWW at | 22 | config SMS_USB_DRV |
25 | <http://www.siano-ms.com/>. | 23 | tristate "USB interface support" |
24 | depends on DVB_CORE && USB | ||
25 | ---help--- | ||
26 | Choose if you would like to have Siano's support for USB interface | ||
26 | 27 | ||
28 | config SMS_SDIO_DRV | ||
29 | tristate "SDIO interface support" | ||
30 | depends on DVB_CORE && MMC | ||
31 | ---help--- | ||
32 | Choose if you would like to have Siano's support for SDIO interface | ||
33 | endmenu | ||
34 | endif # SMS_SIANO_MDTV | ||
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile index c6644d909433..c54140b5ab5a 100644 --- a/drivers/media/dvb/siano/Makefile +++ b/drivers/media/dvb/siano/Makefile | |||
@@ -1,8 +1,9 @@ | |||
1 | sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o | ||
2 | 1 | ||
3 | obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o | 2 | smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o |
4 | obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o | 3 | |
5 | obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsdvb.o | 4 | obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o |
5 | obj-$(CONFIG_SMS_USB_DRV) += smsusb.o | ||
6 | obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o | ||
6 | 7 | ||
7 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core | 8 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core |
8 | 9 | ||
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c index d8b15d583bde..0420e2885e75 100644 --- a/drivers/media/dvb/siano/sms-cards.c +++ b/drivers/media/dvb/siano/sms-cards.c | |||
@@ -116,99 +116,21 @@ static inline void sms_gpio_assign_11xx_default_led_config( | |||
116 | 116 | ||
117 | int sms_board_event(struct smscore_device_t *coredev, | 117 | int sms_board_event(struct smscore_device_t *coredev, |
118 | enum SMS_BOARD_EVENTS gevent) { | 118 | enum SMS_BOARD_EVENTS gevent) { |
119 | int board_id = smscore_get_board_id(coredev); | ||
120 | struct sms_board *board = sms_get_board(board_id); | ||
121 | struct smscore_gpio_config MyGpioConfig; | 119 | struct smscore_gpio_config MyGpioConfig; |
122 | 120 | ||
123 | sms_gpio_assign_11xx_default_led_config(&MyGpioConfig); | 121 | sms_gpio_assign_11xx_default_led_config(&MyGpioConfig); |
124 | 122 | ||
125 | switch (gevent) { | 123 | switch (gevent) { |
126 | case BOARD_EVENT_POWER_INIT: /* including hotplug */ | 124 | case BOARD_EVENT_POWER_INIT: /* including hotplug */ |
127 | switch (board_id) { | ||
128 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
129 | /* set I/O and turn off all LEDs */ | ||
130 | smscore_gpio_configure(coredev, | ||
131 | board->board_cfg.leds_power, | ||
132 | &MyGpioConfig); | ||
133 | smscore_gpio_set_level(coredev, | ||
134 | board->board_cfg.leds_power, 0); | ||
135 | smscore_gpio_configure(coredev, board->board_cfg.led0, | ||
136 | &MyGpioConfig); | ||
137 | smscore_gpio_set_level(coredev, | ||
138 | board->board_cfg.led0, 0); | ||
139 | smscore_gpio_configure(coredev, board->board_cfg.led1, | ||
140 | &MyGpioConfig); | ||
141 | smscore_gpio_set_level(coredev, | ||
142 | board->board_cfg.led1, 0); | ||
143 | break; | ||
144 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: | ||
145 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: | ||
146 | /* set I/O and turn off LNA */ | ||
147 | smscore_gpio_configure(coredev, | ||
148 | board->board_cfg.foreign_lna0_ctrl, | ||
149 | &MyGpioConfig); | ||
150 | smscore_gpio_set_level(coredev, | ||
151 | board->board_cfg.foreign_lna0_ctrl, | ||
152 | 0); | ||
153 | break; | ||
154 | } | ||
155 | break; /* BOARD_EVENT_BIND */ | 125 | break; /* BOARD_EVENT_BIND */ |
156 | 126 | ||
157 | case BOARD_EVENT_POWER_SUSPEND: | 127 | case BOARD_EVENT_POWER_SUSPEND: |
158 | switch (board_id) { | ||
159 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
160 | smscore_gpio_set_level(coredev, | ||
161 | board->board_cfg.leds_power, 0); | ||
162 | smscore_gpio_set_level(coredev, | ||
163 | board->board_cfg.led0, 0); | ||
164 | smscore_gpio_set_level(coredev, | ||
165 | board->board_cfg.led1, 0); | ||
166 | break; | ||
167 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: | ||
168 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: | ||
169 | smscore_gpio_set_level(coredev, | ||
170 | board->board_cfg.foreign_lna0_ctrl, | ||
171 | 0); | ||
172 | break; | ||
173 | } | ||
174 | break; /* BOARD_EVENT_POWER_SUSPEND */ | 128 | break; /* BOARD_EVENT_POWER_SUSPEND */ |
175 | 129 | ||
176 | case BOARD_EVENT_POWER_RESUME: | 130 | case BOARD_EVENT_POWER_RESUME: |
177 | switch (board_id) { | ||
178 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
179 | smscore_gpio_set_level(coredev, | ||
180 | board->board_cfg.leds_power, 1); | ||
181 | smscore_gpio_set_level(coredev, | ||
182 | board->board_cfg.led0, 1); | ||
183 | smscore_gpio_set_level(coredev, | ||
184 | board->board_cfg.led1, 0); | ||
185 | break; | ||
186 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: | ||
187 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: | ||
188 | smscore_gpio_set_level(coredev, | ||
189 | board->board_cfg.foreign_lna0_ctrl, | ||
190 | 1); | ||
191 | break; | ||
192 | } | ||
193 | break; /* BOARD_EVENT_POWER_RESUME */ | 131 | break; /* BOARD_EVENT_POWER_RESUME */ |
194 | 132 | ||
195 | case BOARD_EVENT_BIND: | 133 | case BOARD_EVENT_BIND: |
196 | switch (board_id) { | ||
197 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
198 | smscore_gpio_set_level(coredev, | ||
199 | board->board_cfg.leds_power, 1); | ||
200 | smscore_gpio_set_level(coredev, | ||
201 | board->board_cfg.led0, 1); | ||
202 | smscore_gpio_set_level(coredev, | ||
203 | board->board_cfg.led1, 0); | ||
204 | break; | ||
205 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: | ||
206 | case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: | ||
207 | smscore_gpio_set_level(coredev, | ||
208 | board->board_cfg.foreign_lna0_ctrl, | ||
209 | 1); | ||
210 | break; | ||
211 | } | ||
212 | break; /* BOARD_EVENT_BIND */ | 134 | break; /* BOARD_EVENT_BIND */ |
213 | 135 | ||
214 | case BOARD_EVENT_SCAN_PROG: | 136 | case BOARD_EVENT_SCAN_PROG: |
@@ -218,20 +140,8 @@ int sms_board_event(struct smscore_device_t *coredev, | |||
218 | case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL: | 140 | case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL: |
219 | break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */ | 141 | break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */ |
220 | case BOARD_EVENT_FE_LOCK: | 142 | case BOARD_EVENT_FE_LOCK: |
221 | switch (board_id) { | ||
222 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
223 | smscore_gpio_set_level(coredev, | ||
224 | board->board_cfg.led1, 1); | ||
225 | break; | ||
226 | } | ||
227 | break; /* BOARD_EVENT_FE_LOCK */ | 143 | break; /* BOARD_EVENT_FE_LOCK */ |
228 | case BOARD_EVENT_FE_UNLOCK: | 144 | case BOARD_EVENT_FE_UNLOCK: |
229 | switch (board_id) { | ||
230 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
231 | smscore_gpio_set_level(coredev, | ||
232 | board->board_cfg.led1, 0); | ||
233 | break; | ||
234 | } | ||
235 | break; /* BOARD_EVENT_FE_UNLOCK */ | 145 | break; /* BOARD_EVENT_FE_UNLOCK */ |
236 | case BOARD_EVENT_DEMOD_LOCK: | 146 | case BOARD_EVENT_DEMOD_LOCK: |
237 | break; /* BOARD_EVENT_DEMOD_LOCK */ | 147 | break; /* BOARD_EVENT_DEMOD_LOCK */ |
@@ -248,20 +158,8 @@ int sms_board_event(struct smscore_device_t *coredev, | |||
248 | case BOARD_EVENT_RECEPTION_LOST_0: | 158 | case BOARD_EVENT_RECEPTION_LOST_0: |
249 | break; /* BOARD_EVENT_RECEPTION_LOST_0 */ | 159 | break; /* BOARD_EVENT_RECEPTION_LOST_0 */ |
250 | case BOARD_EVENT_MULTIPLEX_OK: | 160 | case BOARD_EVENT_MULTIPLEX_OK: |
251 | switch (board_id) { | ||
252 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
253 | smscore_gpio_set_level(coredev, | ||
254 | board->board_cfg.led1, 1); | ||
255 | break; | ||
256 | } | ||
257 | break; /* BOARD_EVENT_MULTIPLEX_OK */ | 161 | break; /* BOARD_EVENT_MULTIPLEX_OK */ |
258 | case BOARD_EVENT_MULTIPLEX_ERRORS: | 162 | case BOARD_EVENT_MULTIPLEX_ERRORS: |
259 | switch (board_id) { | ||
260 | case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: | ||
261 | smscore_gpio_set_level(coredev, | ||
262 | board->board_cfg.led1, 0); | ||
263 | break; | ||
264 | } | ||
265 | break; /* BOARD_EVENT_MULTIPLEX_ERRORS */ | 163 | break; /* BOARD_EVENT_MULTIPLEX_ERRORS */ |
266 | 164 | ||
267 | default: | 165 | default: |
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index a246903c3341..bd9ab9d0d12a 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c | |||
@@ -816,7 +816,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) | |||
816 | 816 | ||
817 | sms_debug("set device mode to %d", mode); | 817 | sms_debug("set device mode to %d", mode); |
818 | if (coredev->device_flags & SMS_DEVICE_FAMILY2) { | 818 | if (coredev->device_flags & SMS_DEVICE_FAMILY2) { |
819 | if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_RAW_TUNER) { | 819 | if (mode < DEVICE_MODE_DVBT || mode >= DEVICE_MODE_RAW_TUNER) { |
820 | sms_err("invalid mode specified %d", mode); | 820 | sms_err("invalid mode specified %d", mode); |
821 | return -EINVAL; | 821 | return -EINVAL; |
822 | } | 822 | } |
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c index 3ee1c3902c56..266033ae2784 100644 --- a/drivers/media/dvb/siano/smsdvb.c +++ b/drivers/media/dvb/siano/smsdvb.c | |||
@@ -325,6 +325,16 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client, | |||
325 | 0 : -ETIME; | 325 | 0 : -ETIME; |
326 | } | 326 | } |
327 | 327 | ||
328 | static inline int led_feedback(struct smsdvb_client_t *client) | ||
329 | { | ||
330 | if (client->fe_status & FE_HAS_LOCK) | ||
331 | return sms_board_led_feedback(client->coredev, | ||
332 | (client->sms_stat_dvb.ReceptionData.BER | ||
333 | == 0) ? SMS_LED_HI : SMS_LED_LO); | ||
334 | else | ||
335 | return sms_board_led_feedback(client->coredev, SMS_LED_OFF); | ||
336 | } | ||
337 | |||
328 | static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) | 338 | static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) |
329 | { | 339 | { |
330 | struct smsdvb_client_t *client; | 340 | struct smsdvb_client_t *client; |
@@ -332,6 +342,8 @@ static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) | |||
332 | 342 | ||
333 | *stat = client->fe_status; | 343 | *stat = client->fe_status; |
334 | 344 | ||
345 | led_feedback(client); | ||
346 | |||
335 | return 0; | 347 | return 0; |
336 | } | 348 | } |
337 | 349 | ||
@@ -342,6 +354,8 @@ static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber) | |||
342 | 354 | ||
343 | *ber = client->sms_stat_dvb.ReceptionData.BER; | 355 | *ber = client->sms_stat_dvb.ReceptionData.BER; |
344 | 356 | ||
357 | led_feedback(client); | ||
358 | |||
345 | return 0; | 359 | return 0; |
346 | } | 360 | } |
347 | 361 | ||
@@ -359,6 +373,8 @@ static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength) | |||
359 | (client->sms_stat_dvb.ReceptionData.InBandPwr | 373 | (client->sms_stat_dvb.ReceptionData.InBandPwr |
360 | + 95) * 3 / 2; | 374 | + 95) * 3 / 2; |
361 | 375 | ||
376 | led_feedback(client); | ||
377 | |||
362 | return 0; | 378 | return 0; |
363 | } | 379 | } |
364 | 380 | ||
@@ -369,6 +385,8 @@ static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr) | |||
369 | 385 | ||
370 | *snr = client->sms_stat_dvb.ReceptionData.SNR; | 386 | *snr = client->sms_stat_dvb.ReceptionData.SNR; |
371 | 387 | ||
388 | led_feedback(client); | ||
389 | |||
372 | return 0; | 390 | return 0; |
373 | } | 391 | } |
374 | 392 | ||
@@ -379,6 +397,8 @@ static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) | |||
379 | 397 | ||
380 | *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets; | 398 | *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets; |
381 | 399 | ||
400 | led_feedback(client); | ||
401 | |||
382 | return 0; | 402 | return 0; |
383 | } | 403 | } |
384 | 404 | ||
@@ -404,6 +424,8 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe, | |||
404 | u32 Data[3]; | 424 | u32 Data[3]; |
405 | } Msg; | 425 | } Msg; |
406 | 426 | ||
427 | int ret; | ||
428 | |||
407 | client->fe_status = FE_HAS_SIGNAL; | 429 | client->fe_status = FE_HAS_SIGNAL; |
408 | client->event_fe_state = -1; | 430 | client->event_fe_state = -1; |
409 | client->event_unc_state = -1; | 431 | client->event_unc_state = -1; |
@@ -426,6 +448,23 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe, | |||
426 | case BANDWIDTH_AUTO: return -EOPNOTSUPP; | 448 | case BANDWIDTH_AUTO: return -EOPNOTSUPP; |
427 | default: return -EINVAL; | 449 | default: return -EINVAL; |
428 | } | 450 | } |
451 | /* Disable LNA, if any. An error is returned if no LNA is present */ | ||
452 | ret = sms_board_lna_control(client->coredev, 0); | ||
453 | if (ret == 0) { | ||
454 | fe_status_t status; | ||
455 | |||
456 | /* tune with LNA off at first */ | ||
457 | ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), | ||
458 | &client->tune_done); | ||
459 | |||
460 | smsdvb_read_status(fe, &status); | ||
461 | |||
462 | if (status & FE_HAS_LOCK) | ||
463 | return ret; | ||
464 | |||
465 | /* previous tune didnt lock - enable LNA and tune again */ | ||
466 | sms_board_lna_control(client->coredev, 1); | ||
467 | } | ||
429 | 468 | ||
430 | return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), | 469 | return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), |
431 | &client->tune_done); | 470 | &client->tune_done); |
@@ -451,6 +490,8 @@ static int smsdvb_init(struct dvb_frontend *fe) | |||
451 | struct smsdvb_client_t *client = | 490 | struct smsdvb_client_t *client = |
452 | container_of(fe, struct smsdvb_client_t, frontend); | 491 | container_of(fe, struct smsdvb_client_t, frontend); |
453 | 492 | ||
493 | sms_board_power(client->coredev, 1); | ||
494 | |||
454 | sms_board_dvb3_event(client, DVB3_EVENT_INIT); | 495 | sms_board_dvb3_event(client, DVB3_EVENT_INIT); |
455 | return 0; | 496 | return 0; |
456 | } | 497 | } |
@@ -460,6 +501,9 @@ static int smsdvb_sleep(struct dvb_frontend *fe) | |||
460 | struct smsdvb_client_t *client = | 501 | struct smsdvb_client_t *client = |
461 | container_of(fe, struct smsdvb_client_t, frontend); | 502 | container_of(fe, struct smsdvb_client_t, frontend); |
462 | 503 | ||
504 | sms_board_led_feedback(client->coredev, SMS_LED_OFF); | ||
505 | sms_board_power(client->coredev, 0); | ||
506 | |||
463 | sms_board_dvb3_event(client, DVB3_EVENT_SLEEP); | 507 | sms_board_dvb3_event(client, DVB3_EVENT_SLEEP); |
464 | 508 | ||
465 | return 0; | 509 | return 0; |
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c index dfaa49a53f32..d1d652e7f890 100644 --- a/drivers/media/dvb/siano/smssdio.c +++ b/drivers/media/dvb/siano/smssdio.c | |||
@@ -46,6 +46,7 @@ | |||
46 | 46 | ||
47 | #define SMSSDIO_DATA 0x00 | 47 | #define SMSSDIO_DATA 0x00 |
48 | #define SMSSDIO_INT 0x04 | 48 | #define SMSSDIO_INT 0x04 |
49 | #define SMSSDIO_BLOCK_SIZE 128 | ||
49 | 50 | ||
50 | static const struct sdio_device_id smssdio_ids[] = { | 51 | static const struct sdio_device_id smssdio_ids[] = { |
51 | {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), | 52 | {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), |
@@ -85,7 +86,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size) | |||
85 | sdio_claim_host(smsdev->func); | 86 | sdio_claim_host(smsdev->func); |
86 | 87 | ||
87 | while (size >= smsdev->func->cur_blksize) { | 88 | while (size >= smsdev->func->cur_blksize) { |
88 | ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1); | 89 | ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, |
90 | buffer, smsdev->func->cur_blksize); | ||
89 | if (ret) | 91 | if (ret) |
90 | goto out; | 92 | goto out; |
91 | 93 | ||
@@ -94,8 +96,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size) | |||
94 | } | 96 | } |
95 | 97 | ||
96 | if (size) { | 98 | if (size) { |
97 | ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA, | 99 | ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, |
98 | buffer, size); | 100 | buffer, size); |
99 | } | 101 | } |
100 | 102 | ||
101 | out: | 103 | out: |
@@ -125,23 +127,23 @@ static void smssdio_interrupt(struct sdio_func *func) | |||
125 | */ | 127 | */ |
126 | isr = sdio_readb(func, SMSSDIO_INT, &ret); | 128 | isr = sdio_readb(func, SMSSDIO_INT, &ret); |
127 | if (ret) { | 129 | if (ret) { |
128 | dev_err(&smsdev->func->dev, | 130 | sms_err("Unable to read interrupt register!\n"); |
129 | "Unable to read interrupt register!\n"); | ||
130 | return; | 131 | return; |
131 | } | 132 | } |
132 | 133 | ||
133 | if (smsdev->split_cb == NULL) { | 134 | if (smsdev->split_cb == NULL) { |
134 | cb = smscore_getbuffer(smsdev->coredev); | 135 | cb = smscore_getbuffer(smsdev->coredev); |
135 | if (!cb) { | 136 | if (!cb) { |
136 | dev_err(&smsdev->func->dev, | 137 | sms_err("Unable to allocate data buffer!\n"); |
137 | "Unable to allocate data buffer!\n"); | ||
138 | return; | 138 | return; |
139 | } | 139 | } |
140 | 140 | ||
141 | ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1); | 141 | ret = sdio_memcpy_fromio(smsdev->func, |
142 | cb->p, | ||
143 | SMSSDIO_DATA, | ||
144 | SMSSDIO_BLOCK_SIZE); | ||
142 | if (ret) { | 145 | if (ret) { |
143 | dev_err(&smsdev->func->dev, | 146 | sms_err("Error %d reading initial block!\n", ret); |
144 | "Error %d reading initial block!\n", ret); | ||
145 | return; | 147 | return; |
146 | } | 148 | } |
147 | 149 | ||
@@ -152,7 +154,10 @@ static void smssdio_interrupt(struct sdio_func *func) | |||
152 | return; | 154 | return; |
153 | } | 155 | } |
154 | 156 | ||
155 | size = hdr->msgLength - smsdev->func->cur_blksize; | 157 | if (hdr->msgLength > smsdev->func->cur_blksize) |
158 | size = hdr->msgLength - smsdev->func->cur_blksize; | ||
159 | else | ||
160 | size = 0; | ||
156 | } else { | 161 | } else { |
157 | cb = smsdev->split_cb; | 162 | cb = smsdev->split_cb; |
158 | hdr = cb->p; | 163 | hdr = cb->p; |
@@ -162,23 +167,24 @@ static void smssdio_interrupt(struct sdio_func *func) | |||
162 | smsdev->split_cb = NULL; | 167 | smsdev->split_cb = NULL; |
163 | } | 168 | } |
164 | 169 | ||
165 | if (hdr->msgLength > smsdev->func->cur_blksize) { | 170 | if (size) { |
166 | void *buffer; | 171 | void *buffer; |
167 | 172 | ||
168 | size = ALIGN(size, 128); | 173 | buffer = cb->p + (hdr->msgLength - size); |
169 | buffer = cb->p + hdr->msgLength; | 174 | size = ALIGN(size, SMSSDIO_BLOCK_SIZE); |
170 | 175 | ||
171 | BUG_ON(smsdev->func->cur_blksize != 128); | 176 | BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); |
172 | 177 | ||
173 | /* | 178 | /* |
174 | * First attempt to transfer all of it in one go... | 179 | * First attempt to transfer all of it in one go... |
175 | */ | 180 | */ |
176 | ret = sdio_read_blocks(smsdev->func, buffer, | 181 | ret = sdio_memcpy_fromio(smsdev->func, |
177 | SMSSDIO_DATA, size / 128); | 182 | buffer, |
183 | SMSSDIO_DATA, | ||
184 | size); | ||
178 | if (ret && ret != -EINVAL) { | 185 | if (ret && ret != -EINVAL) { |
179 | smscore_putbuffer(smsdev->coredev, cb); | 186 | smscore_putbuffer(smsdev->coredev, cb); |
180 | dev_err(&smsdev->func->dev, | 187 | sms_err("Error %d reading data from card!\n", ret); |
181 | "Error %d reading data from card!\n", ret); | ||
182 | return; | 188 | return; |
183 | } | 189 | } |
184 | 190 | ||
@@ -191,12 +197,12 @@ static void smssdio_interrupt(struct sdio_func *func) | |||
191 | */ | 197 | */ |
192 | if (ret == -EINVAL) { | 198 | if (ret == -EINVAL) { |
193 | while (size) { | 199 | while (size) { |
194 | ret = sdio_read_blocks(smsdev->func, | 200 | ret = sdio_memcpy_fromio(smsdev->func, |
195 | buffer, SMSSDIO_DATA, 1); | 201 | buffer, SMSSDIO_DATA, |
202 | smsdev->func->cur_blksize); | ||
196 | if (ret) { | 203 | if (ret) { |
197 | smscore_putbuffer(smsdev->coredev, cb); | 204 | smscore_putbuffer(smsdev->coredev, cb); |
198 | dev_err(&smsdev->func->dev, | 205 | sms_err("Error %d reading " |
199 | "Error %d reading " | ||
200 | "data from card!\n", ret); | 206 | "data from card!\n", ret); |
201 | return; | 207 | return; |
202 | } | 208 | } |
@@ -269,7 +275,7 @@ static int smssdio_probe(struct sdio_func *func, | |||
269 | if (ret) | 275 | if (ret) |
270 | goto release; | 276 | goto release; |
271 | 277 | ||
272 | ret = sdio_set_block_size(func, 128); | 278 | ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE); |
273 | if (ret) | 279 | if (ret) |
274 | goto disable; | 280 | goto disable; |
275 | 281 | ||
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 84b6fc15519d..dcf9fa9264bb 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -920,6 +920,8 @@ source "drivers/media/video/pwc/Kconfig" | |||
920 | config USB_ZR364XX | 920 | config USB_ZR364XX |
921 | tristate "USB ZR364XX Camera support" | 921 | tristate "USB ZR364XX Camera support" |
922 | depends on VIDEO_V4L2 | 922 | depends on VIDEO_V4L2 |
923 | select VIDEOBUF_GEN | ||
924 | select VIDEOBUF_VMALLOC | ||
923 | ---help--- | 925 | ---help--- |
924 | Say Y here if you want to connect this type of camera to your | 926 | Say Y here if you want to connect this type of camera to your |
925 | computer's USB port. | 927 | computer's USB port. |
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c index 10dbd4a11b30..9e39bc5f7b00 100644 --- a/drivers/media/video/bw-qcam.c +++ b/drivers/media/video/bw-qcam.c | |||
@@ -992,7 +992,7 @@ static int accept_bwqcam(struct parport *port) | |||
992 | 992 | ||
993 | if (parport[0] && strncmp(parport[0], "auto", 4) != 0) { | 993 | if (parport[0] && strncmp(parport[0], "auto", 4) != 0) { |
994 | /* user gave parport parameters */ | 994 | /* user gave parport parameters */ |
995 | for(n=0; parport[n] && n<MAX_CAMS; n++){ | 995 | for (n = 0; n < MAX_CAMS && parport[n]; n++) { |
996 | char *ep; | 996 | char *ep; |
997 | unsigned long r; | 997 | unsigned long r; |
998 | r = simple_strtoul(parport[n], &ep, 0); | 998 | r = simple_strtoul(parport[n], &ep, 0); |
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c index 5136df198338..93f0dae01350 100644 --- a/drivers/media/video/cx18/cx18-controls.c +++ b/drivers/media/video/cx18/cx18-controls.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA | 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
21 | * 02111-1307 USA | 21 | * 02111-1307 USA |
22 | */ | 22 | */ |
23 | #include <linux/kernel.h> | ||
23 | 24 | ||
24 | #include "cx18-driver.h" | 25 | #include "cx18-driver.h" |
25 | #include "cx18-cards.h" | 26 | #include "cx18-cards.h" |
@@ -317,7 +318,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c) | |||
317 | idx = p.audio_properties & 0x03; | 318 | idx = p.audio_properties & 0x03; |
318 | /* The audio clock of the digitizer must match the codec sample | 319 | /* The audio clock of the digitizer must match the codec sample |
319 | rate otherwise you get some very strange effects. */ | 320 | rate otherwise you get some very strange effects. */ |
320 | if (idx < sizeof(freqs)) | 321 | if (idx < ARRAY_SIZE(freqs)) |
321 | cx18_call_all(cx, audio, s_clock_freq, freqs[idx]); | 322 | cx18_call_all(cx, audio, s_clock_freq, freqs[idx]); |
322 | return err; | 323 | return err; |
323 | } | 324 | } |
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index e0cf21e0b1bf..1a1048b18f70 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c | |||
@@ -1715,6 +1715,8 @@ static struct video_device cx23885_mpeg_template = { | |||
1715 | .fops = &mpeg_fops, | 1715 | .fops = &mpeg_fops, |
1716 | .ioctl_ops = &mpeg_ioctl_ops, | 1716 | .ioctl_ops = &mpeg_ioctl_ops, |
1717 | .minor = -1, | 1717 | .minor = -1, |
1718 | .tvnorms = CX23885_NORMS, | ||
1719 | .current_norm = V4L2_STD_NTSC_M, | ||
1718 | }; | 1720 | }; |
1719 | 1721 | ||
1720 | void cx23885_417_unregister(struct cx23885_dev *dev) | 1722 | void cx23885_417_unregister(struct cx23885_dev *dev) |
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index a5cc1c1fc2d6..39465301ec94 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c | |||
@@ -3003,6 +3003,14 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl) | |||
3003 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: | 3003 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: |
3004 | ctl->demod = XC3028_FE_OREN538; | 3004 | ctl->demod = XC3028_FE_OREN538; |
3005 | break; | 3005 | break; |
3006 | case CX88_BOARD_GENIATECH_X8000_MT: | ||
3007 | /* FIXME: For this board, the xc3028 never recovers after being | ||
3008 | powered down (the reset GPIO probably is not set properly). | ||
3009 | We don't have access to the hardware so we cannot determine | ||
3010 | which GPIO is used for xc3028, so just disable power xc3028 | ||
3011 | power management for now */ | ||
3012 | ctl->disable_power_mgmt = 1; | ||
3013 | break; | ||
3006 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: | 3014 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: |
3007 | case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: | 3015 | case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: |
3008 | case CX88_BOARD_PROLINK_PV_8000GT: | 3016 | case CX88_BOARD_PROLINK_PV_8000GT: |
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c index c44e87600219..e237b507659b 100644 --- a/drivers/media/video/cx88/cx88-dvb.c +++ b/drivers/media/video/cx88/cx88-dvb.c | |||
@@ -501,6 +501,7 @@ static struct zl10353_config cx88_pinnacle_hybrid_pctv = { | |||
501 | static struct zl10353_config cx88_geniatech_x8000_mt = { | 501 | static struct zl10353_config cx88_geniatech_x8000_mt = { |
502 | .demod_address = (0x1e >> 1), | 502 | .demod_address = (0x1e >> 1), |
503 | .no_tuner = 1, | 503 | .no_tuner = 1, |
504 | .disable_i2c_gate_ctrl = 1, | ||
504 | }; | 505 | }; |
505 | 506 | ||
506 | static struct s5h1411_config dvico_fusionhdtv7_config = { | 507 | static struct s5h1411_config dvico_fusionhdtv7_config = { |
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c index da4e3912cd37..7172dcf2a4fa 100644 --- a/drivers/media/video/cx88/cx88-mpeg.c +++ b/drivers/media/video/cx88/cx88-mpeg.c | |||
@@ -116,6 +116,10 @@ static int cx8802_start_dma(struct cx8802_dev *dev, | |||
116 | udelay(100); | 116 | udelay(100); |
117 | break; | 117 | break; |
118 | case CX88_BOARD_HAUPPAUGE_HVR1300: | 118 | case CX88_BOARD_HAUPPAUGE_HVR1300: |
119 | /* Enable MPEG parallel IO and video signal pins */ | ||
120 | cx_write(MO_PINMUX_IO, 0x88); | ||
121 | cx_write(TS_SOP_STAT, 0); | ||
122 | cx_write(TS_VALERR_CNTRL, 0); | ||
119 | break; | 123 | break; |
120 | case CX88_BOARD_PINNACLE_PCTV_HD_800i: | 124 | case CX88_BOARD_PINNACLE_PCTV_HD_800i: |
121 | /* Enable MPEG parallel IO and video signal pins */ | 125 | /* Enable MPEG parallel IO and video signal pins */ |
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 320f1f60276e..1c2e544eda73 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c | |||
@@ -218,7 +218,7 @@ static struct em28xx_reg_seq silvercrest_reg_seq[] = { | |||
218 | struct em28xx_board em28xx_boards[] = { | 218 | struct em28xx_board em28xx_boards[] = { |
219 | [EM2750_BOARD_UNKNOWN] = { | 219 | [EM2750_BOARD_UNKNOWN] = { |
220 | .name = "EM2710/EM2750/EM2751 webcam grabber", | 220 | .name = "EM2710/EM2750/EM2751 webcam grabber", |
221 | .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, | 221 | .xclk = EM28XX_XCLK_FREQUENCY_20MHZ, |
222 | .tuner_type = TUNER_ABSENT, | 222 | .tuner_type = TUNER_ABSENT, |
223 | .is_webcam = 1, | 223 | .is_webcam = 1, |
224 | .input = { { | 224 | .input = { { |
@@ -622,22 +622,27 @@ struct em28xx_board em28xx_boards[] = { | |||
622 | }, | 622 | }, |
623 | [EM2861_BOARD_PLEXTOR_PX_TV100U] = { | 623 | [EM2861_BOARD_PLEXTOR_PX_TV100U] = { |
624 | .name = "Plextor ConvertX PX-TV100U", | 624 | .name = "Plextor ConvertX PX-TV100U", |
625 | .valid = EM28XX_BOARD_NOT_VALIDATED, | ||
626 | .tuner_type = TUNER_TNF_5335MF, | 625 | .tuner_type = TUNER_TNF_5335MF, |
626 | .xclk = EM28XX_XCLK_I2S_MSB_TIMING | | ||
627 | EM28XX_XCLK_FREQUENCY_12MHZ, | ||
627 | .tda9887_conf = TDA9887_PRESENT, | 628 | .tda9887_conf = TDA9887_PRESENT, |
628 | .decoder = EM28XX_TVP5150, | 629 | .decoder = EM28XX_TVP5150, |
630 | .has_msp34xx = 1, | ||
629 | .input = { { | 631 | .input = { { |
630 | .type = EM28XX_VMUX_TELEVISION, | 632 | .type = EM28XX_VMUX_TELEVISION, |
631 | .vmux = TVP5150_COMPOSITE0, | 633 | .vmux = TVP5150_COMPOSITE0, |
632 | .amux = EM28XX_AMUX_LINE_IN, | 634 | .amux = EM28XX_AMUX_LINE_IN, |
635 | .gpio = pinnacle_hybrid_pro_analog, | ||
633 | }, { | 636 | }, { |
634 | .type = EM28XX_VMUX_COMPOSITE1, | 637 | .type = EM28XX_VMUX_COMPOSITE1, |
635 | .vmux = TVP5150_COMPOSITE1, | 638 | .vmux = TVP5150_COMPOSITE1, |
636 | .amux = EM28XX_AMUX_LINE_IN, | 639 | .amux = EM28XX_AMUX_LINE_IN, |
640 | .gpio = pinnacle_hybrid_pro_analog, | ||
637 | }, { | 641 | }, { |
638 | .type = EM28XX_VMUX_SVIDEO, | 642 | .type = EM28XX_VMUX_SVIDEO, |
639 | .vmux = TVP5150_SVIDEO, | 643 | .vmux = TVP5150_SVIDEO, |
640 | .amux = EM28XX_AMUX_LINE_IN, | 644 | .amux = EM28XX_AMUX_LINE_IN, |
645 | .gpio = pinnacle_hybrid_pro_analog, | ||
641 | } }, | 646 | } }, |
642 | }, | 647 | }, |
643 | 648 | ||
@@ -1544,6 +1549,8 @@ struct usb_device_id em28xx_id_table[] = { | |||
1544 | .driver_info = EM2750_BOARD_UNKNOWN }, | 1549 | .driver_info = EM2750_BOARD_UNKNOWN }, |
1545 | { USB_DEVICE(0xeb1a, 0x2800), | 1550 | { USB_DEVICE(0xeb1a, 0x2800), |
1546 | .driver_info = EM2800_BOARD_UNKNOWN }, | 1551 | .driver_info = EM2800_BOARD_UNKNOWN }, |
1552 | { USB_DEVICE(0xeb1a, 0x2710), | ||
1553 | .driver_info = EM2820_BOARD_UNKNOWN }, | ||
1547 | { USB_DEVICE(0xeb1a, 0x2820), | 1554 | { USB_DEVICE(0xeb1a, 0x2820), |
1548 | .driver_info = EM2820_BOARD_UNKNOWN }, | 1555 | .driver_info = EM2820_BOARD_UNKNOWN }, |
1549 | { USB_DEVICE(0xeb1a, 0x2821), | 1556 | { USB_DEVICE(0xeb1a, 0x2821), |
@@ -1723,6 +1730,25 @@ static inline void em28xx_set_model(struct em28xx *dev) | |||
1723 | EM28XX_I2C_FREQ_100_KHZ; | 1730 | EM28XX_I2C_FREQ_100_KHZ; |
1724 | } | 1731 | } |
1725 | 1732 | ||
1733 | |||
1734 | /* FIXME: Should be replaced by a proper mt9m111 driver */ | ||
1735 | static int em28xx_initialize_mt9m111(struct em28xx *dev) | ||
1736 | { | ||
1737 | int i; | ||
1738 | unsigned char regs[][3] = { | ||
1739 | { 0x0d, 0x00, 0x01, }, /* reset and use defaults */ | ||
1740 | { 0x0d, 0x00, 0x00, }, | ||
1741 | { 0x0a, 0x00, 0x21, }, | ||
1742 | { 0x21, 0x04, 0x00, }, /* full readout speed, no row/col skipping */ | ||
1743 | }; | ||
1744 | |||
1745 | for (i = 0; i < ARRAY_SIZE(regs); i++) | ||
1746 | i2c_master_send(&dev->i2c_client, ®s[i][0], 3); | ||
1747 | |||
1748 | return 0; | ||
1749 | } | ||
1750 | |||
1751 | |||
1726 | /* FIXME: Should be replaced by a proper mt9m001 driver */ | 1752 | /* FIXME: Should be replaced by a proper mt9m001 driver */ |
1727 | static int em28xx_initialize_mt9m001(struct em28xx *dev) | 1753 | static int em28xx_initialize_mt9m001(struct em28xx *dev) |
1728 | { | 1754 | { |
@@ -1751,7 +1777,7 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev) | |||
1751 | 1777 | ||
1752 | /* HINT method: webcam I2C chips | 1778 | /* HINT method: webcam I2C chips |
1753 | * | 1779 | * |
1754 | * This method work for webcams with Micron sensors | 1780 | * This method works for webcams with Micron sensors |
1755 | */ | 1781 | */ |
1756 | static int em28xx_hint_sensor(struct em28xx *dev) | 1782 | static int em28xx_hint_sensor(struct em28xx *dev) |
1757 | { | 1783 | { |
@@ -1761,6 +1787,7 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1761 | __be16 version_be; | 1787 | __be16 version_be; |
1762 | u16 version; | 1788 | u16 version; |
1763 | 1789 | ||
1790 | /* Micron sensor detection */ | ||
1764 | dev->i2c_client.addr = 0xba >> 1; | 1791 | dev->i2c_client.addr = 0xba >> 1; |
1765 | cmd = 0; | 1792 | cmd = 0; |
1766 | i2c_master_send(&dev->i2c_client, &cmd, 1); | 1793 | i2c_master_send(&dev->i2c_client, &cmd, 1); |
@@ -1769,23 +1796,54 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1769 | return -EINVAL; | 1796 | return -EINVAL; |
1770 | 1797 | ||
1771 | version = be16_to_cpu(version_be); | 1798 | version = be16_to_cpu(version_be); |
1772 | |||
1773 | switch (version) { | 1799 | switch (version) { |
1774 | case 0x8243: /* mt9v011 640x480 1.3 Mpix sensor */ | 1800 | case 0x8232: /* mt9v011 640x480 1.3 Mpix sensor */ |
1801 | case 0x8243: /* mt9v011 rev B 640x480 1.3 Mpix sensor */ | ||
1775 | dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; | 1802 | dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; |
1803 | em28xx_set_model(dev); | ||
1804 | |||
1776 | sensor_name = "mt9v011"; | 1805 | sensor_name = "mt9v011"; |
1777 | dev->em28xx_sensor = EM28XX_MT9V011; | 1806 | dev->em28xx_sensor = EM28XX_MT9V011; |
1778 | dev->sensor_xres = 640; | 1807 | dev->sensor_xres = 640; |
1779 | dev->sensor_yres = 480; | 1808 | dev->sensor_yres = 480; |
1780 | dev->sensor_xtal = 6300000; | 1809 | /* |
1810 | * FIXME: mt9v011 uses I2S speed as xtal clk - at least with | ||
1811 | * the Silvercrest cam I have here for testing - for higher | ||
1812 | * resolutions, a high clock cause horizontal artifacts, so we | ||
1813 | * need to use a lower xclk frequency. | ||
1814 | * Yet, it would be possible to adjust xclk depending on the | ||
1815 | * desired resolution, since this affects directly the | ||
1816 | * frame rate. | ||
1817 | */ | ||
1818 | dev->board.xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ; | ||
1819 | dev->sensor_xtal = 4300000; | ||
1781 | 1820 | ||
1782 | /* probably means GRGB 16 bit bayer */ | 1821 | /* probably means GRGB 16 bit bayer */ |
1783 | dev->vinmode = 0x0d; | 1822 | dev->vinmode = 0x0d; |
1784 | dev->vinctl = 0x00; | 1823 | dev->vinctl = 0x00; |
1785 | 1824 | ||
1786 | break; | 1825 | break; |
1826 | |||
1827 | case 0x143a: /* MT9M111 as found in the ECS G200 */ | ||
1828 | dev->model = EM2750_BOARD_UNKNOWN; | ||
1829 | em28xx_set_model(dev); | ||
1830 | |||
1831 | sensor_name = "mt9m111"; | ||
1832 | dev->board.xclk = EM28XX_XCLK_FREQUENCY_48MHZ; | ||
1833 | dev->em28xx_sensor = EM28XX_MT9M111; | ||
1834 | em28xx_initialize_mt9m111(dev); | ||
1835 | dev->sensor_xres = 640; | ||
1836 | dev->sensor_yres = 512; | ||
1837 | |||
1838 | dev->vinmode = 0x0a; | ||
1839 | dev->vinctl = 0x00; | ||
1840 | |||
1841 | break; | ||
1842 | |||
1787 | case 0x8431: | 1843 | case 0x8431: |
1788 | dev->model = EM2750_BOARD_UNKNOWN; | 1844 | dev->model = EM2750_BOARD_UNKNOWN; |
1845 | em28xx_set_model(dev); | ||
1846 | |||
1789 | sensor_name = "mt9m001"; | 1847 | sensor_name = "mt9m001"; |
1790 | dev->em28xx_sensor = EM28XX_MT9M001; | 1848 | dev->em28xx_sensor = EM28XX_MT9M001; |
1791 | em28xx_initialize_mt9m001(dev); | 1849 | em28xx_initialize_mt9m001(dev); |
@@ -1798,10 +1856,13 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1798 | 1856 | ||
1799 | break; | 1857 | break; |
1800 | default: | 1858 | default: |
1801 | printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version)); | 1859 | printk("Unknown Micron Sensor 0x%04x\n", version); |
1802 | return -EINVAL; | 1860 | return -EINVAL; |
1803 | } | 1861 | } |
1804 | 1862 | ||
1863 | /* Setup webcam defaults */ | ||
1864 | em28xx_pre_card_setup(dev); | ||
1865 | |||
1805 | em28xx_errdev("Sensor is %s, using model %s entry.\n", | 1866 | em28xx_errdev("Sensor is %s, using model %s entry.\n", |
1806 | sensor_name, em28xx_boards[dev->model].name); | 1867 | sensor_name, em28xx_boards[dev->model].name); |
1807 | 1868 | ||
@@ -1813,60 +1874,6 @@ static int em28xx_hint_sensor(struct em28xx *dev) | |||
1813 | */ | 1874 | */ |
1814 | void em28xx_pre_card_setup(struct em28xx *dev) | 1875 | void em28xx_pre_card_setup(struct em28xx *dev) |
1815 | { | 1876 | { |
1816 | int rc; | ||
1817 | |||
1818 | em28xx_set_model(dev); | ||
1819 | |||
1820 | em28xx_info("Identified as %s (card=%d)\n", | ||
1821 | dev->board.name, dev->model); | ||
1822 | |||
1823 | /* Set the default GPO/GPIO for legacy devices */ | ||
1824 | dev->reg_gpo_num = EM2880_R04_GPO; | ||
1825 | dev->reg_gpio_num = EM28XX_R08_GPIO; | ||
1826 | |||
1827 | dev->wait_after_write = 5; | ||
1828 | |||
1829 | /* Based on the Chip ID, set the device configuration */ | ||
1830 | rc = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); | ||
1831 | if (rc > 0) { | ||
1832 | dev->chip_id = rc; | ||
1833 | |||
1834 | switch (dev->chip_id) { | ||
1835 | case CHIP_ID_EM2750: | ||
1836 | em28xx_info("chip ID is em2750\n"); | ||
1837 | break; | ||
1838 | case CHIP_ID_EM2820: | ||
1839 | em28xx_info("chip ID is em2710 or em2820\n"); | ||
1840 | break; | ||
1841 | case CHIP_ID_EM2840: | ||
1842 | em28xx_info("chip ID is em2840\n"); | ||
1843 | break; | ||
1844 | case CHIP_ID_EM2860: | ||
1845 | em28xx_info("chip ID is em2860\n"); | ||
1846 | break; | ||
1847 | case CHIP_ID_EM2870: | ||
1848 | em28xx_info("chip ID is em2870\n"); | ||
1849 | dev->wait_after_write = 0; | ||
1850 | break; | ||
1851 | case CHIP_ID_EM2874: | ||
1852 | em28xx_info("chip ID is em2874\n"); | ||
1853 | dev->reg_gpio_num = EM2874_R80_GPIO; | ||
1854 | dev->wait_after_write = 0; | ||
1855 | break; | ||
1856 | case CHIP_ID_EM2883: | ||
1857 | em28xx_info("chip ID is em2882/em2883\n"); | ||
1858 | dev->wait_after_write = 0; | ||
1859 | break; | ||
1860 | default: | ||
1861 | em28xx_info("em28xx chip ID = %d\n", dev->chip_id); | ||
1862 | } | ||
1863 | } | ||
1864 | |||
1865 | /* Prepopulate cached GPO register content */ | ||
1866 | rc = em28xx_read_reg(dev, dev->reg_gpo_num); | ||
1867 | if (rc >= 0) | ||
1868 | dev->reg_gpo = rc; | ||
1869 | |||
1870 | /* Set the initial XCLK and I2C clock values based on the board | 1877 | /* Set the initial XCLK and I2C clock values based on the board |
1871 | definition */ | 1878 | definition */ |
1872 | em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f); | 1879 | em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f); |
@@ -1876,9 +1883,8 @@ void em28xx_pre_card_setup(struct em28xx *dev) | |||
1876 | /* request some modules */ | 1883 | /* request some modules */ |
1877 | switch (dev->model) { | 1884 | switch (dev->model) { |
1878 | case EM2861_BOARD_PLEXTOR_PX_TV100U: | 1885 | case EM2861_BOARD_PLEXTOR_PX_TV100U: |
1879 | /* FIXME guess */ | 1886 | /* Sets the msp34xx I2S speed */ |
1880 | /* Turn on analog audio output */ | 1887 | dev->i2s_speed = 2048000; |
1881 | em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd); | ||
1882 | break; | 1888 | break; |
1883 | case EM2861_BOARD_KWORLD_PVRTV_300U: | 1889 | case EM2861_BOARD_KWORLD_PVRTV_300U: |
1884 | case EM2880_BOARD_KWORLD_DVB_305U: | 1890 | case EM2880_BOARD_KWORLD_DVB_305U: |
@@ -2216,7 +2222,20 @@ void em28xx_register_i2c_ir(struct em28xx *dev) | |||
2216 | 2222 | ||
2217 | void em28xx_card_setup(struct em28xx *dev) | 2223 | void em28xx_card_setup(struct em28xx *dev) |
2218 | { | 2224 | { |
2219 | em28xx_set_model(dev); | 2225 | /* |
2226 | * If the device can be a webcam, seek for a sensor. | ||
2227 | * If sensor is not found, then it isn't a webcam. | ||
2228 | */ | ||
2229 | if (dev->board.is_webcam) { | ||
2230 | if (em28xx_hint_sensor(dev) < 0) | ||
2231 | dev->board.is_webcam = 0; | ||
2232 | else | ||
2233 | dev->progressive = 1; | ||
2234 | } else | ||
2235 | em28xx_set_model(dev); | ||
2236 | |||
2237 | em28xx_info("Identified as %s (card=%d)\n", | ||
2238 | dev->board.name, dev->model); | ||
2220 | 2239 | ||
2221 | dev->tuner_type = em28xx_boards[dev->model].tuner_type; | 2240 | dev->tuner_type = em28xx_boards[dev->model].tuner_type; |
2222 | if (em28xx_boards[dev->model].tuner_addr) | 2241 | if (em28xx_boards[dev->model].tuner_addr) |
@@ -2290,10 +2309,6 @@ void em28xx_card_setup(struct em28xx *dev) | |||
2290 | em28xx_gpio_set(dev, dev->board.tuner_gpio); | 2309 | em28xx_gpio_set(dev, dev->board.tuner_gpio); |
2291 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); | 2310 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); |
2292 | break; | 2311 | break; |
2293 | case EM2820_BOARD_SILVERCREST_WEBCAM: | ||
2294 | /* FIXME: need to document the registers bellow */ | ||
2295 | em28xx_write_reg(dev, 0x0d, 0x42); | ||
2296 | em28xx_write_reg(dev, 0x13, 0x08); | ||
2297 | } | 2312 | } |
2298 | 2313 | ||
2299 | if (dev->board.has_snapshot_button) | 2314 | if (dev->board.has_snapshot_button) |
@@ -2367,7 +2382,9 @@ void em28xx_card_setup(struct em28xx *dev) | |||
2367 | } | 2382 | } |
2368 | 2383 | ||
2369 | em28xx_tuner_setup(dev); | 2384 | em28xx_tuner_setup(dev); |
2370 | em28xx_ir_init(dev); | 2385 | |
2386 | if(!disable_ir) | ||
2387 | em28xx_ir_init(dev); | ||
2371 | } | 2388 | } |
2372 | 2389 | ||
2373 | 2390 | ||
@@ -2433,7 +2450,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, | |||
2433 | int minor) | 2450 | int minor) |
2434 | { | 2451 | { |
2435 | struct em28xx *dev = *devhandle; | 2452 | struct em28xx *dev = *devhandle; |
2436 | int retval = -ENOMEM; | 2453 | int retval; |
2437 | int errCode; | 2454 | int errCode; |
2438 | 2455 | ||
2439 | dev->udev = udev; | 2456 | dev->udev = udev; |
@@ -2450,6 +2467,58 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, | |||
2450 | dev->em28xx_read_reg_req = em28xx_read_reg_req; | 2467 | dev->em28xx_read_reg_req = em28xx_read_reg_req; |
2451 | dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800; | 2468 | dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800; |
2452 | 2469 | ||
2470 | em28xx_set_model(dev); | ||
2471 | |||
2472 | /* Set the default GPO/GPIO for legacy devices */ | ||
2473 | dev->reg_gpo_num = EM2880_R04_GPO; | ||
2474 | dev->reg_gpio_num = EM28XX_R08_GPIO; | ||
2475 | |||
2476 | dev->wait_after_write = 5; | ||
2477 | |||
2478 | /* Based on the Chip ID, set the device configuration */ | ||
2479 | retval = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); | ||
2480 | if (retval > 0) { | ||
2481 | dev->chip_id = retval; | ||
2482 | |||
2483 | switch (dev->chip_id) { | ||
2484 | case CHIP_ID_EM2710: | ||
2485 | em28xx_info("chip ID is em2710\n"); | ||
2486 | break; | ||
2487 | case CHIP_ID_EM2750: | ||
2488 | em28xx_info("chip ID is em2750\n"); | ||
2489 | break; | ||
2490 | case CHIP_ID_EM2820: | ||
2491 | em28xx_info("chip ID is em2820 (or em2710)\n"); | ||
2492 | break; | ||
2493 | case CHIP_ID_EM2840: | ||
2494 | em28xx_info("chip ID is em2840\n"); | ||
2495 | break; | ||
2496 | case CHIP_ID_EM2860: | ||
2497 | em28xx_info("chip ID is em2860\n"); | ||
2498 | break; | ||
2499 | case CHIP_ID_EM2870: | ||
2500 | em28xx_info("chip ID is em2870\n"); | ||
2501 | dev->wait_after_write = 0; | ||
2502 | break; | ||
2503 | case CHIP_ID_EM2874: | ||
2504 | em28xx_info("chip ID is em2874\n"); | ||
2505 | dev->reg_gpio_num = EM2874_R80_GPIO; | ||
2506 | dev->wait_after_write = 0; | ||
2507 | break; | ||
2508 | case CHIP_ID_EM2883: | ||
2509 | em28xx_info("chip ID is em2882/em2883\n"); | ||
2510 | dev->wait_after_write = 0; | ||
2511 | break; | ||
2512 | default: | ||
2513 | em28xx_info("em28xx chip ID = %d\n", dev->chip_id); | ||
2514 | } | ||
2515 | } | ||
2516 | |||
2517 | /* Prepopulate cached GPO register content */ | ||
2518 | retval = em28xx_read_reg(dev, dev->reg_gpo_num); | ||
2519 | if (retval >= 0) | ||
2520 | dev->reg_gpo = retval; | ||
2521 | |||
2453 | em28xx_pre_card_setup(dev); | 2522 | em28xx_pre_card_setup(dev); |
2454 | 2523 | ||
2455 | if (!dev->board.is_em2800) { | 2524 | if (!dev->board.is_em2800) { |
@@ -2484,14 +2553,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, | |||
2484 | dev->vinmode = 0x10; | 2553 | dev->vinmode = 0x10; |
2485 | dev->vinctl = 0x11; | 2554 | dev->vinctl = 0x11; |
2486 | 2555 | ||
2487 | /* | ||
2488 | * If the device can be a webcam, seek for a sensor. | ||
2489 | * If sensor is not found, then it isn't a webcam. | ||
2490 | */ | ||
2491 | if (dev->board.is_webcam) | ||
2492 | if (em28xx_hint_sensor(dev) < 0) | ||
2493 | dev->board.is_webcam = 0; | ||
2494 | |||
2495 | /* Do board specific init and eeprom reading */ | 2556 | /* Do board specific init and eeprom reading */ |
2496 | em28xx_card_setup(dev); | 2557 | em28xx_card_setup(dev); |
2497 | 2558 | ||
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c index 5b78e199abd1..98e140b5d95e 100644 --- a/drivers/media/video/em28xx/em28xx-core.c +++ b/drivers/media/video/em28xx/em28xx-core.c | |||
@@ -632,6 +632,9 @@ int em28xx_capture_start(struct em28xx *dev, int start) | |||
632 | return rc; | 632 | return rc; |
633 | } | 633 | } |
634 | 634 | ||
635 | if (dev->board.is_webcam) | ||
636 | rc = em28xx_write_reg(dev, 0x13, 0x0c); | ||
637 | |||
635 | /* enable video capture */ | 638 | /* enable video capture */ |
636 | rc = em28xx_write_reg(dev, 0x48, 0x00); | 639 | rc = em28xx_write_reg(dev, 0x48, 0x00); |
637 | 640 | ||
@@ -720,7 +723,10 @@ int em28xx_resolution_set(struct em28xx *dev) | |||
720 | { | 723 | { |
721 | int width, height; | 724 | int width, height; |
722 | width = norm_maxw(dev); | 725 | width = norm_maxw(dev); |
723 | height = norm_maxh(dev) >> 1; | 726 | height = norm_maxh(dev); |
727 | |||
728 | if (!dev->progressive) | ||
729 | height >>= norm_maxh(dev); | ||
724 | 730 | ||
725 | em28xx_set_outfmt(dev); | 731 | em28xx_set_outfmt(dev); |
726 | 732 | ||
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index cf0ac7f2a30d..d603575431b4 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c | |||
@@ -478,7 +478,6 @@ static int dvb_init(struct em28xx *dev) | |||
478 | } | 478 | } |
479 | break; | 479 | break; |
480 | case EM2880_BOARD_KWORLD_DVB_310U: | 480 | case EM2880_BOARD_KWORLD_DVB_310U: |
481 | case EM2880_BOARD_EMPIRE_DUAL_TV: | ||
482 | dvb->frontend = dvb_attach(zl10353_attach, | 481 | dvb->frontend = dvb_attach(zl10353_attach, |
483 | &em28xx_zl10353_with_xc3028, | 482 | &em28xx_zl10353_with_xc3028, |
484 | &dev->i2c_adap); | 483 | &dev->i2c_adap); |
@@ -488,6 +487,7 @@ static int dvb_init(struct em28xx *dev) | |||
488 | } | 487 | } |
489 | break; | 488 | break; |
490 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: | 489 | case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: |
490 | case EM2880_BOARD_EMPIRE_DUAL_TV: | ||
491 | dvb->frontend = dvb_attach(zl10353_attach, | 491 | dvb->frontend = dvb_attach(zl10353_attach, |
492 | &em28xx_zl10353_xc3028_no_i2c_gate, | 492 | &em28xx_zl10353_xc3028_no_i2c_gate, |
493 | &dev->i2c_adap); | 493 | &dev->i2c_adap); |
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h index a2676d63cfd0..6bf84bd787df 100644 --- a/drivers/media/video/em28xx/em28xx-reg.h +++ b/drivers/media/video/em28xx/em28xx-reg.h | |||
@@ -176,7 +176,8 @@ | |||
176 | 176 | ||
177 | /* FIXME: Need to be populated with the other chip ID's */ | 177 | /* FIXME: Need to be populated with the other chip ID's */ |
178 | enum em28xx_chip_id { | 178 | enum em28xx_chip_id { |
179 | CHIP_ID_EM2820 = 18, /* Also used by em2710 */ | 179 | CHIP_ID_EM2710 = 17, |
180 | CHIP_ID_EM2820 = 18, /* Also used by some em2710 */ | ||
180 | CHIP_ID_EM2840 = 20, | 181 | CHIP_ID_EM2840 = 20, |
181 | CHIP_ID_EM2750 = 33, | 182 | CHIP_ID_EM2750 = 33, |
182 | CHIP_ID_EM2860 = 34, | 183 | CHIP_ID_EM2860 = 34, |
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index ff37b4c15f44..ab079d9256c4 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c | |||
@@ -194,15 +194,24 @@ static void em28xx_copy_video(struct em28xx *dev, | |||
194 | startread = p; | 194 | startread = p; |
195 | remain = len; | 195 | remain = len; |
196 | 196 | ||
197 | /* Interlaces frame */ | 197 | if (dev->progressive) |
198 | if (buf->top_field) | ||
199 | fieldstart = outp; | 198 | fieldstart = outp; |
200 | else | 199 | else { |
201 | fieldstart = outp + bytesperline; | 200 | /* Interlaces two half frames */ |
201 | if (buf->top_field) | ||
202 | fieldstart = outp; | ||
203 | else | ||
204 | fieldstart = outp + bytesperline; | ||
205 | } | ||
202 | 206 | ||
203 | linesdone = dma_q->pos / bytesperline; | 207 | linesdone = dma_q->pos / bytesperline; |
204 | currlinedone = dma_q->pos % bytesperline; | 208 | currlinedone = dma_q->pos % bytesperline; |
205 | offset = linesdone * bytesperline * 2 + currlinedone; | 209 | |
210 | if (dev->progressive) | ||
211 | offset = linesdone * bytesperline + currlinedone; | ||
212 | else | ||
213 | offset = linesdone * bytesperline * 2 + currlinedone; | ||
214 | |||
206 | startwrite = fieldstart + offset; | 215 | startwrite = fieldstart + offset; |
207 | lencopy = bytesperline - currlinedone; | 216 | lencopy = bytesperline - currlinedone; |
208 | lencopy = lencopy > remain ? remain : lencopy; | 217 | lencopy = lencopy > remain ? remain : lencopy; |
@@ -376,7 +385,7 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb) | |||
376 | em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2], | 385 | em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2], |
377 | len, (p[2] & 1) ? "odd" : "even"); | 386 | len, (p[2] & 1) ? "odd" : "even"); |
378 | 387 | ||
379 | if (!(p[2] & 1)) { | 388 | if (dev->progressive || !(p[2] & 1)) { |
380 | if (buf != NULL) | 389 | if (buf != NULL) |
381 | buffer_filled(dev, dma_q, buf); | 390 | buffer_filled(dev, dma_q, buf); |
382 | get_next_buf(dma_q, &buf); | 391 | get_next_buf(dma_q, &buf); |
@@ -689,7 +698,10 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, | |||
689 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; | 698 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; |
690 | 699 | ||
691 | /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */ | 700 | /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */ |
692 | f->fmt.pix.field = dev->interlaced ? | 701 | if (dev->progressive) |
702 | f->fmt.pix.field = V4L2_FIELD_NONE; | ||
703 | else | ||
704 | f->fmt.pix.field = dev->interlaced ? | ||
693 | V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; | 705 | V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; |
694 | 706 | ||
695 | mutex_unlock(&dev->lock); | 707 | mutex_unlock(&dev->lock); |
@@ -753,7 +765,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
753 | f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3; | 765 | f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3; |
754 | f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height; | 766 | f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height; |
755 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; | 767 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; |
756 | f->fmt.pix.field = V4L2_FIELD_INTERLACED; | 768 | if (dev->progressive) |
769 | f->fmt.pix.field = V4L2_FIELD_NONE; | ||
770 | else | ||
771 | f->fmt.pix.field = dev->interlaced ? | ||
772 | V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; | ||
757 | 773 | ||
758 | return 0; | 774 | return 0; |
759 | } | 775 | } |
@@ -846,6 +862,41 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm) | |||
846 | return 0; | 862 | return 0; |
847 | } | 863 | } |
848 | 864 | ||
865 | static int vidioc_g_parm(struct file *file, void *priv, | ||
866 | struct v4l2_streamparm *p) | ||
867 | { | ||
868 | struct em28xx_fh *fh = priv; | ||
869 | struct em28xx *dev = fh->dev; | ||
870 | int rc = 0; | ||
871 | |||
872 | if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
873 | return -EINVAL; | ||
874 | |||
875 | if (dev->board.is_webcam) | ||
876 | rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0, | ||
877 | video, g_parm, p); | ||
878 | else | ||
879 | v4l2_video_std_frame_period(dev->norm, | ||
880 | &p->parm.capture.timeperframe); | ||
881 | |||
882 | return rc; | ||
883 | } | ||
884 | |||
885 | static int vidioc_s_parm(struct file *file, void *priv, | ||
886 | struct v4l2_streamparm *p) | ||
887 | { | ||
888 | struct em28xx_fh *fh = priv; | ||
889 | struct em28xx *dev = fh->dev; | ||
890 | |||
891 | if (!dev->board.is_webcam) | ||
892 | return -EINVAL; | ||
893 | |||
894 | if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
895 | return -EINVAL; | ||
896 | |||
897 | return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p); | ||
898 | } | ||
899 | |||
849 | static const char *iname[] = { | 900 | static const char *iname[] = { |
850 | [EM28XX_VMUX_COMPOSITE1] = "Composite1", | 901 | [EM28XX_VMUX_COMPOSITE1] = "Composite1", |
851 | [EM28XX_VMUX_COMPOSITE2] = "Composite2", | 902 | [EM28XX_VMUX_COMPOSITE2] = "Composite2", |
@@ -1624,6 +1675,7 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1624 | struct em28xx *dev; | 1675 | struct em28xx *dev; |
1625 | enum v4l2_buf_type fh_type; | 1676 | enum v4l2_buf_type fh_type; |
1626 | struct em28xx_fh *fh; | 1677 | struct em28xx_fh *fh; |
1678 | enum v4l2_field field; | ||
1627 | 1679 | ||
1628 | dev = em28xx_get_device(minor, &fh_type, &radio); | 1680 | dev = em28xx_get_device(minor, &fh_type, &radio); |
1629 | 1681 | ||
@@ -1665,8 +1717,13 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1665 | 1717 | ||
1666 | dev->users++; | 1718 | dev->users++; |
1667 | 1719 | ||
1720 | if (dev->progressive) | ||
1721 | field = V4L2_FIELD_NONE; | ||
1722 | else | ||
1723 | field = V4L2_FIELD_INTERLACED; | ||
1724 | |||
1668 | videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops, | 1725 | videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops, |
1669 | NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED, | 1726 | NULL, &dev->slock, fh->type, field, |
1670 | sizeof(struct em28xx_buffer), fh); | 1727 | sizeof(struct em28xx_buffer), fh); |
1671 | 1728 | ||
1672 | mutex_unlock(&dev->lock); | 1729 | mutex_unlock(&dev->lock); |
@@ -1885,6 +1942,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { | |||
1885 | .vidioc_qbuf = vidioc_qbuf, | 1942 | .vidioc_qbuf = vidioc_qbuf, |
1886 | .vidioc_dqbuf = vidioc_dqbuf, | 1943 | .vidioc_dqbuf = vidioc_dqbuf, |
1887 | .vidioc_s_std = vidioc_s_std, | 1944 | .vidioc_s_std = vidioc_s_std, |
1945 | .vidioc_g_parm = vidioc_g_parm, | ||
1946 | .vidioc_s_parm = vidioc_s_parm, | ||
1888 | .vidioc_enum_input = vidioc_enum_input, | 1947 | .vidioc_enum_input = vidioc_enum_input, |
1889 | .vidioc_g_input = vidioc_g_input, | 1948 | .vidioc_g_input = vidioc_g_input, |
1890 | .vidioc_s_input = vidioc_s_input, | 1949 | .vidioc_s_input = vidioc_s_input, |
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h index 45bd513f62dc..a2add61f7d59 100644 --- a/drivers/media/video/em28xx/em28xx.h +++ b/drivers/media/video/em28xx/em28xx.h | |||
@@ -367,6 +367,7 @@ enum em28xx_sensor { | |||
367 | EM28XX_NOSENSOR = 0, | 367 | EM28XX_NOSENSOR = 0, |
368 | EM28XX_MT9V011, | 368 | EM28XX_MT9V011, |
369 | EM28XX_MT9M001, | 369 | EM28XX_MT9M001, |
370 | EM28XX_MT9M111, | ||
370 | }; | 371 | }; |
371 | 372 | ||
372 | enum em28xx_adecoder { | 373 | enum em28xx_adecoder { |
@@ -484,6 +485,9 @@ struct em28xx { | |||
484 | int sensor_xres, sensor_yres; | 485 | int sensor_xres, sensor_yres; |
485 | int sensor_xtal; | 486 | int sensor_xtal; |
486 | 487 | ||
488 | /* Allows progressive (e. g. non-interlaced) mode */ | ||
489 | int progressive; | ||
490 | |||
487 | /* Vinmode/Vinctl used at the driver */ | 491 | /* Vinmode/Vinctl used at the driver */ |
488 | int vinmode, vinctl; | 492 | int vinmode, vinctl; |
489 | 493 | ||
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig index 34f46f2bc040..e994dcac43ff 100644 --- a/drivers/media/video/gspca/Kconfig +++ b/drivers/media/video/gspca/Kconfig | |||
@@ -114,7 +114,7 @@ config USB_GSPCA_SN9C20X | |||
114 | 114 | ||
115 | config USB_GSPCA_SN9C20X_EVDEV | 115 | config USB_GSPCA_SN9C20X_EVDEV |
116 | bool "Enable evdev support" | 116 | bool "Enable evdev support" |
117 | depends on USB_GSPCA_SN9C20X | 117 | depends on USB_GSPCA_SN9C20X && INPUT |
118 | ---help--- | 118 | ---help--- |
119 | Say Y here in order to enable evdev support for sn9c20x webcam button. | 119 | Say Y here in order to enable evdev support for sn9c20x webcam button. |
120 | 120 | ||
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index ccd47f57f42c..d678765cbba2 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c | |||
@@ -1220,6 +1220,8 @@ static const struct video_device hdpvr_video_template = { | |||
1220 | V4L2_STD_PAL_G | V4L2_STD_PAL_H | V4L2_STD_PAL_I | | 1220 | V4L2_STD_PAL_G | V4L2_STD_PAL_H | V4L2_STD_PAL_I | |
1221 | V4L2_STD_PAL_D | V4L2_STD_PAL_M | V4L2_STD_PAL_N | | 1221 | V4L2_STD_PAL_D | V4L2_STD_PAL_M | V4L2_STD_PAL_N | |
1222 | V4L2_STD_PAL_60, | 1222 | V4L2_STD_PAL_60, |
1223 | .current_norm = V4L2_STD_NTSC | V4L2_STD_PAL_M | | ||
1224 | V4L2_STD_PAL_60, | ||
1223 | }; | 1225 | }; |
1224 | 1226 | ||
1225 | int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, | 1227 | int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, |
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c index a3b77ed3f089..4a9c8ce0ecb3 100644 --- a/drivers/media/video/ivtv/ivtv-controls.c +++ b/drivers/media/video/ivtv/ivtv-controls.c | |||
@@ -17,6 +17,7 @@ | |||
17 | along with this program; if not, write to the Free Software | 17 | along with this program; if not, write to the Free Software |
18 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #include <linux/kernel.h> | ||
20 | 21 | ||
21 | #include "ivtv-driver.h" | 22 | #include "ivtv-driver.h" |
22 | #include "ivtv-cards.h" | 23 | #include "ivtv-cards.h" |
@@ -281,7 +282,7 @@ int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c) | |||
281 | idx = p.audio_properties & 0x03; | 282 | idx = p.audio_properties & 0x03; |
282 | /* The audio clock of the digitizer must match the codec sample | 283 | /* The audio clock of the digitizer must match the codec sample |
283 | rate otherwise you get some very strange effects. */ | 284 | rate otherwise you get some very strange effects. */ |
284 | if (idx < sizeof(freqs)) | 285 | if (idx < ARRAY_SIZE(freqs)) |
285 | ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]); | 286 | ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]); |
286 | return err; | 287 | return err; |
287 | } | 288 | } |
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c index b2260de645f0..cc85f77a5706 100644 --- a/drivers/media/video/mt9v011.c +++ b/drivers/media/video/mt9v011.c | |||
@@ -52,13 +52,34 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = { | |||
52 | .step = 1, | 52 | .step = 1, |
53 | .default_value = 0, | 53 | .default_value = 0, |
54 | .flags = 0, | 54 | .flags = 0, |
55 | }, | 55 | }, { |
56 | .id = V4L2_CID_HFLIP, | ||
57 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
58 | .name = "Mirror", | ||
59 | .minimum = 0, | ||
60 | .maximum = 1, | ||
61 | .step = 1, | ||
62 | .default_value = 0, | ||
63 | .flags = 0, | ||
64 | }, { | ||
65 | .id = V4L2_CID_VFLIP, | ||
66 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
67 | .name = "Vflip", | ||
68 | .minimum = 0, | ||
69 | .maximum = 1, | ||
70 | .step = 1, | ||
71 | .default_value = 0, | ||
72 | .flags = 0, | ||
73 | }, { | ||
74 | } | ||
56 | }; | 75 | }; |
57 | 76 | ||
58 | struct mt9v011 { | 77 | struct mt9v011 { |
59 | struct v4l2_subdev sd; | 78 | struct v4l2_subdev sd; |
60 | unsigned width, height; | 79 | unsigned width, height; |
61 | unsigned xtal; | 80 | unsigned xtal; |
81 | unsigned hflip:1; | ||
82 | unsigned vflip:1; | ||
62 | 83 | ||
63 | u16 global_gain, red_bal, blue_bal; | 84 | u16 global_gain, red_bal, blue_bal; |
64 | }; | 85 | }; |
@@ -131,7 +152,6 @@ static const struct i2c_reg_value mt9v011_init_default[] = { | |||
131 | 152 | ||
132 | { R0A_MT9V011_CLK_SPEED, 0x0000 }, | 153 | { R0A_MT9V011_CLK_SPEED, 0x0000 }, |
133 | { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, | 154 | { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, |
134 | { R20_MT9V011_READ_MODE, 0x1000 }, | ||
135 | 155 | ||
136 | { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ | 156 | { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ |
137 | }; | 157 | }; |
@@ -156,7 +176,7 @@ static void set_balance(struct v4l2_subdev *sd) | |||
156 | mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); | 176 | mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); |
157 | } | 177 | } |
158 | 178 | ||
159 | static void calc_fps(struct v4l2_subdev *sd) | 179 | static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator) |
160 | { | 180 | { |
161 | struct mt9v011 *core = to_mt9v011(sd); | 181 | struct mt9v011 *core = to_mt9v011(sd); |
162 | unsigned height, width, hblank, vblank, speed; | 182 | unsigned height, width, hblank, vblank, speed; |
@@ -179,6 +199,51 @@ static void calc_fps(struct v4l2_subdev *sd) | |||
179 | 199 | ||
180 | v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n", | 200 | v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n", |
181 | tmp / 1000, tmp % 1000, t_time); | 201 | tmp / 1000, tmp % 1000, t_time); |
202 | |||
203 | if (numerator && denominator) { | ||
204 | *numerator = 1000; | ||
205 | *denominator = (u32)frames_per_ms; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | static u16 calc_speed(struct v4l2_subdev *sd, u32 numerator, u32 denominator) | ||
210 | { | ||
211 | struct mt9v011 *core = to_mt9v011(sd); | ||
212 | unsigned height, width, hblank, vblank; | ||
213 | unsigned row_time, line_time; | ||
214 | u64 t_time, speed; | ||
215 | |||
216 | /* Avoid bogus calculus */ | ||
217 | if (!numerator || !denominator) | ||
218 | return 0; | ||
219 | |||
220 | height = mt9v011_read(sd, R03_MT9V011_HEIGHT); | ||
221 | width = mt9v011_read(sd, R04_MT9V011_WIDTH); | ||
222 | hblank = mt9v011_read(sd, R05_MT9V011_HBLANK); | ||
223 | vblank = mt9v011_read(sd, R06_MT9V011_VBLANK); | ||
224 | |||
225 | row_time = width + 113 + hblank; | ||
226 | line_time = height + vblank + 1; | ||
227 | |||
228 | t_time = core->xtal * ((u64)numerator); | ||
229 | /* round to the closest value */ | ||
230 | t_time += denominator / 2; | ||
231 | do_div(t_time, denominator); | ||
232 | |||
233 | speed = t_time; | ||
234 | do_div(speed, row_time * line_time); | ||
235 | |||
236 | /* Avoid having a negative value for speed */ | ||
237 | if (speed < 2) | ||
238 | speed = 0; | ||
239 | else | ||
240 | speed -= 2; | ||
241 | |||
242 | /* Avoid speed overflow */ | ||
243 | if (speed > 15) | ||
244 | return 15; | ||
245 | |||
246 | return (u16)speed; | ||
182 | } | 247 | } |
183 | 248 | ||
184 | static void set_res(struct v4l2_subdev *sd) | 249 | static void set_res(struct v4l2_subdev *sd) |
@@ -207,9 +272,23 @@ static void set_res(struct v4l2_subdev *sd) | |||
207 | mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); | 272 | mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); |
208 | mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); | 273 | mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); |
209 | 274 | ||
210 | calc_fps(sd); | 275 | calc_fps(sd, NULL, NULL); |
211 | }; | 276 | }; |
212 | 277 | ||
278 | static void set_read_mode(struct v4l2_subdev *sd) | ||
279 | { | ||
280 | struct mt9v011 *core = to_mt9v011(sd); | ||
281 | unsigned mode = 0x1000; | ||
282 | |||
283 | if (core->hflip) | ||
284 | mode |= 0x4000; | ||
285 | |||
286 | if (core->vflip) | ||
287 | mode |= 0x8000; | ||
288 | |||
289 | mt9v011_write(sd, R20_MT9V011_READ_MODE, mode); | ||
290 | } | ||
291 | |||
213 | static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) | 292 | static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) |
214 | { | 293 | { |
215 | int i; | 294 | int i; |
@@ -220,6 +299,7 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) | |||
220 | 299 | ||
221 | set_balance(sd); | 300 | set_balance(sd); |
222 | set_res(sd); | 301 | set_res(sd); |
302 | set_read_mode(sd); | ||
223 | 303 | ||
224 | return 0; | 304 | return 0; |
225 | }; | 305 | }; |
@@ -240,6 +320,12 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) | |||
240 | case V4L2_CID_BLUE_BALANCE: | 320 | case V4L2_CID_BLUE_BALANCE: |
241 | ctrl->value = core->blue_bal; | 321 | ctrl->value = core->blue_bal; |
242 | return 0; | 322 | return 0; |
323 | case V4L2_CID_HFLIP: | ||
324 | ctrl->value = core->hflip ? 1 : 0; | ||
325 | return 0; | ||
326 | case V4L2_CID_VFLIP: | ||
327 | ctrl->value = core->vflip ? 1 : 0; | ||
328 | return 0; | ||
243 | } | 329 | } |
244 | return -EINVAL; | 330 | return -EINVAL; |
245 | } | 331 | } |
@@ -288,6 +374,14 @@ static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) | |||
288 | case V4L2_CID_BLUE_BALANCE: | 374 | case V4L2_CID_BLUE_BALANCE: |
289 | core->blue_bal = ctrl->value; | 375 | core->blue_bal = ctrl->value; |
290 | break; | 376 | break; |
377 | case V4L2_CID_HFLIP: | ||
378 | core->hflip = ctrl->value; | ||
379 | set_read_mode(sd); | ||
380 | return 0; | ||
381 | case V4L2_CID_VFLIP: | ||
382 | core->vflip = ctrl->value; | ||
383 | set_read_mode(sd); | ||
384 | return 0; | ||
291 | default: | 385 | default: |
292 | return -EINVAL; | 386 | return -EINVAL; |
293 | } | 387 | } |
@@ -322,6 +416,44 @@ static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) | |||
322 | return 0; | 416 | return 0; |
323 | } | 417 | } |
324 | 418 | ||
419 | static int mt9v011_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) | ||
420 | { | ||
421 | struct v4l2_captureparm *cp = &parms->parm.capture; | ||
422 | |||
423 | if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
424 | return -EINVAL; | ||
425 | |||
426 | memset(cp, 0, sizeof(struct v4l2_captureparm)); | ||
427 | cp->capability = V4L2_CAP_TIMEPERFRAME; | ||
428 | calc_fps(sd, | ||
429 | &cp->timeperframe.numerator, | ||
430 | &cp->timeperframe.denominator); | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static int mt9v011_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) | ||
436 | { | ||
437 | struct v4l2_captureparm *cp = &parms->parm.capture; | ||
438 | struct v4l2_fract *tpf = &cp->timeperframe; | ||
439 | u16 speed; | ||
440 | |||
441 | if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
442 | return -EINVAL; | ||
443 | if (cp->extendedmode != 0) | ||
444 | return -EINVAL; | ||
445 | |||
446 | speed = calc_speed(sd, tpf->numerator, tpf->denominator); | ||
447 | |||
448 | mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed); | ||
449 | v4l2_dbg(1, debug, sd, "Setting speed to %d\n", speed); | ||
450 | |||
451 | /* Recalculate and update fps info */ | ||
452 | calc_fps(sd, &tpf->numerator, &tpf->denominator); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
325 | static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) | 457 | static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) |
326 | { | 458 | { |
327 | struct v4l2_pix_format *pix = &fmt->fmt.pix; | 459 | struct v4l2_pix_format *pix = &fmt->fmt.pix; |
@@ -393,10 +525,13 @@ static int mt9v011_s_register(struct v4l2_subdev *sd, | |||
393 | static int mt9v011_g_chip_ident(struct v4l2_subdev *sd, | 525 | static int mt9v011_g_chip_ident(struct v4l2_subdev *sd, |
394 | struct v4l2_dbg_chip_ident *chip) | 526 | struct v4l2_dbg_chip_ident *chip) |
395 | { | 527 | { |
528 | u16 version; | ||
396 | struct i2c_client *client = v4l2_get_subdevdata(sd); | 529 | struct i2c_client *client = v4l2_get_subdevdata(sd); |
397 | 530 | ||
531 | version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); | ||
532 | |||
398 | return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011, | 533 | return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011, |
399 | MT9V011_VERSION); | 534 | version); |
400 | } | 535 | } |
401 | 536 | ||
402 | static const struct v4l2_subdev_core_ops mt9v011_core_ops = { | 537 | static const struct v4l2_subdev_core_ops mt9v011_core_ops = { |
@@ -416,6 +551,8 @@ static const struct v4l2_subdev_video_ops mt9v011_video_ops = { | |||
416 | .enum_fmt = mt9v011_enum_fmt, | 551 | .enum_fmt = mt9v011_enum_fmt, |
417 | .try_fmt = mt9v011_try_fmt, | 552 | .try_fmt = mt9v011_try_fmt, |
418 | .s_fmt = mt9v011_s_fmt, | 553 | .s_fmt = mt9v011_s_fmt, |
554 | .g_parm = mt9v011_g_parm, | ||
555 | .s_parm = mt9v011_s_parm, | ||
419 | }; | 556 | }; |
420 | 557 | ||
421 | static const struct v4l2_subdev_ops mt9v011_ops = { | 558 | static const struct v4l2_subdev_ops mt9v011_ops = { |
@@ -449,8 +586,9 @@ static int mt9v011_probe(struct i2c_client *c, | |||
449 | 586 | ||
450 | /* Check if the sensor is really a MT9V011 */ | 587 | /* Check if the sensor is really a MT9V011 */ |
451 | version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); | 588 | version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); |
452 | if (version != MT9V011_VERSION) { | 589 | if ((version != MT9V011_VERSION) && |
453 | v4l2_info(sd, "*** unknown micron chip detected (0x%04x.\n", | 590 | (version != MT9V011_REV_B_VERSION)) { |
591 | v4l2_info(sd, "*** unknown micron chip detected (0x%04x).\n", | ||
454 | version); | 592 | version); |
455 | kfree(core); | 593 | kfree(core); |
456 | return -EINVAL; | 594 | return -EINVAL; |
@@ -461,8 +599,8 @@ static int mt9v011_probe(struct i2c_client *c, | |||
461 | core->height = 480; | 599 | core->height = 480; |
462 | core->xtal = 27000000; /* Hz */ | 600 | core->xtal = 27000000; /* Hz */ |
463 | 601 | ||
464 | v4l_info(c, "chip found @ 0x%02x (%s)\n", | 602 | v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n", |
465 | c->addr << 1, c->adapter->name); | 603 | c->addr << 1, c->adapter->name, version); |
466 | 604 | ||
467 | return 0; | 605 | return 0; |
468 | } | 606 | } |
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h index 9e443ee30558..3350fd6083c3 100644 --- a/drivers/media/video/mt9v011.h +++ b/drivers/media/video/mt9v011.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define R35_MT9V011_GLOBAL_GAIN 0x35 | 30 | #define R35_MT9V011_GLOBAL_GAIN 0x35 |
31 | #define RF1_MT9V011_CHIP_ENABLE 0xf1 | 31 | #define RF1_MT9V011_CHIP_ENABLE 0xf1 |
32 | 32 | ||
33 | #define MT9V011_VERSION 0x8243 | 33 | #define MT9V011_VERSION 0x8232 |
34 | #define MT9V011_REV_B_VERSION 0x8243 | ||
34 | 35 | ||
35 | #endif | 36 | #endif |
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 2d075205bdfe..736c31d23194 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c | |||
@@ -234,6 +234,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev) | |||
234 | return ret; | 234 | return ret; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* Called under spinlock_irqsave(&pcdev->lock, ...) */ | ||
237 | static void mx1_videobuf_queue(struct videobuf_queue *vq, | 238 | static void mx1_videobuf_queue(struct videobuf_queue *vq, |
238 | struct videobuf_buffer *vb) | 239 | struct videobuf_buffer *vb) |
239 | { | 240 | { |
@@ -241,13 +242,10 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq, | |||
241 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | 242 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); |
242 | struct mx1_camera_dev *pcdev = ici->priv; | 243 | struct mx1_camera_dev *pcdev = ici->priv; |
243 | struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); | 244 | struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); |
244 | unsigned long flags; | ||
245 | 245 | ||
246 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, | 246 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, |
247 | vb, vb->baddr, vb->bsize); | 247 | vb, vb->baddr, vb->bsize); |
248 | 248 | ||
249 | spin_lock_irqsave(&pcdev->lock, flags); | ||
250 | |||
251 | list_add_tail(&vb->queue, &pcdev->capture); | 249 | list_add_tail(&vb->queue, &pcdev->capture); |
252 | 250 | ||
253 | vb->state = VIDEOBUF_ACTIVE; | 251 | vb->state = VIDEOBUF_ACTIVE; |
@@ -264,8 +262,6 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq, | |||
264 | __raw_writel(temp, pcdev->base + CSICR1); | 262 | __raw_writel(temp, pcdev->base + CSICR1); |
265 | } | 263 | } |
266 | } | 264 | } |
267 | |||
268 | spin_unlock_irqrestore(&pcdev->lock, flags); | ||
269 | } | 265 | } |
270 | 266 | ||
271 | static void mx1_videobuf_release(struct videobuf_queue *vq, | 267 | static void mx1_videobuf_release(struct videobuf_queue *vq, |
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index e605c076ed89..9770cb7932ca 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c | |||
@@ -332,7 +332,10 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc) | |||
332 | } | 332 | } |
333 | } | 333 | } |
334 | 334 | ||
335 | /* Called with .vb_lock held */ | 335 | /* |
336 | * Called with .vb_lock mutex held and | ||
337 | * under spinlock_irqsave(&mx3_cam->lock, ...) | ||
338 | */ | ||
336 | static void mx3_videobuf_queue(struct videobuf_queue *vq, | 339 | static void mx3_videobuf_queue(struct videobuf_queue *vq, |
337 | struct videobuf_buffer *vb) | 340 | struct videobuf_buffer *vb) |
338 | { | 341 | { |
@@ -346,7 +349,8 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, | |||
346 | struct idmac_video_param *video = &ichan->params.video; | 349 | struct idmac_video_param *video = &ichan->params.video; |
347 | const struct soc_camera_data_format *data_fmt = icd->current_fmt; | 350 | const struct soc_camera_data_format *data_fmt = icd->current_fmt; |
348 | dma_cookie_t cookie; | 351 | dma_cookie_t cookie; |
349 | unsigned long flags; | 352 | |
353 | BUG_ON(!irqs_disabled()); | ||
350 | 354 | ||
351 | /* This is the configuration of one sg-element */ | 355 | /* This is the configuration of one sg-element */ |
352 | video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); | 356 | video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); |
@@ -359,8 +363,6 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, | |||
359 | memset((void *)vb->baddr, 0xaa, vb->bsize); | 363 | memset((void *)vb->baddr, 0xaa, vb->bsize); |
360 | #endif | 364 | #endif |
361 | 365 | ||
362 | spin_lock_irqsave(&mx3_cam->lock, flags); | ||
363 | |||
364 | list_add_tail(&vb->queue, &mx3_cam->capture); | 366 | list_add_tail(&vb->queue, &mx3_cam->capture); |
365 | 367 | ||
366 | if (!mx3_cam->active) { | 368 | if (!mx3_cam->active) { |
@@ -370,24 +372,23 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, | |||
370 | vb->state = VIDEOBUF_QUEUED; | 372 | vb->state = VIDEOBUF_QUEUED; |
371 | } | 373 | } |
372 | 374 | ||
373 | spin_unlock_irqrestore(&mx3_cam->lock, flags); | 375 | spin_unlock_irq(&mx3_cam->lock); |
374 | 376 | ||
375 | cookie = txd->tx_submit(txd); | 377 | cookie = txd->tx_submit(txd); |
376 | dev_dbg(&icd->dev, "Submitted cookie %d DMA 0x%08x\n", cookie, sg_dma_address(&buf->sg)); | 378 | dev_dbg(&icd->dev, "Submitted cookie %d DMA 0x%08x\n", cookie, sg_dma_address(&buf->sg)); |
379 | |||
380 | spin_lock_irq(&mx3_cam->lock); | ||
381 | |||
377 | if (cookie >= 0) | 382 | if (cookie >= 0) |
378 | return; | 383 | return; |
379 | 384 | ||
380 | /* Submit error */ | 385 | /* Submit error */ |
381 | vb->state = VIDEOBUF_PREPARED; | 386 | vb->state = VIDEOBUF_PREPARED; |
382 | 387 | ||
383 | spin_lock_irqsave(&mx3_cam->lock, flags); | ||
384 | |||
385 | list_del_init(&vb->queue); | 388 | list_del_init(&vb->queue); |
386 | 389 | ||
387 | if (mx3_cam->active == buf) | 390 | if (mx3_cam->active == buf) |
388 | mx3_cam->active = NULL; | 391 | mx3_cam->active = NULL; |
389 | |||
390 | spin_unlock_irqrestore(&mx3_cam->lock, flags); | ||
391 | } | 392 | } |
392 | 393 | ||
393 | /* Called with .vb_lock held */ | 394 | /* Called with .vb_lock held */ |
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 46e0d8ad880f..016bb45ba0c3 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c | |||
@@ -612,6 +612,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev) | |||
612 | dev_dbg(pcdev->soc_host.dev, "%s\n", __func__); | 612 | dev_dbg(pcdev->soc_host.dev, "%s\n", __func__); |
613 | } | 613 | } |
614 | 614 | ||
615 | /* Called under spinlock_irqsave(&pcdev->lock, ...) */ | ||
615 | static void pxa_videobuf_queue(struct videobuf_queue *vq, | 616 | static void pxa_videobuf_queue(struct videobuf_queue *vq, |
616 | struct videobuf_buffer *vb) | 617 | struct videobuf_buffer *vb) |
617 | { | 618 | { |
@@ -619,13 +620,10 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq, | |||
619 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | 620 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); |
620 | struct pxa_camera_dev *pcdev = ici->priv; | 621 | struct pxa_camera_dev *pcdev = ici->priv; |
621 | struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); | 622 | struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); |
622 | unsigned long flags; | ||
623 | 623 | ||
624 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__, | 624 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__, |
625 | vb, vb->baddr, vb->bsize, pcdev->active); | 625 | vb, vb->baddr, vb->bsize, pcdev->active); |
626 | 626 | ||
627 | spin_lock_irqsave(&pcdev->lock, flags); | ||
628 | |||
629 | list_add_tail(&vb->queue, &pcdev->capture); | 627 | list_add_tail(&vb->queue, &pcdev->capture); |
630 | 628 | ||
631 | vb->state = VIDEOBUF_ACTIVE; | 629 | vb->state = VIDEOBUF_ACTIVE; |
@@ -633,8 +631,6 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq, | |||
633 | 631 | ||
634 | if (!pcdev->active) | 632 | if (!pcdev->active) |
635 | pxa_camera_start_capture(pcdev); | 633 | pxa_camera_start_capture(pcdev); |
636 | |||
637 | spin_unlock_irqrestore(&pcdev->lock, flags); | ||
638 | } | 634 | } |
639 | 635 | ||
640 | static void pxa_videobuf_release(struct videobuf_queue *vq, | 636 | static void pxa_videobuf_release(struct videobuf_queue *vq, |
@@ -1579,6 +1575,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) | |||
1579 | pcdev->mclk = 20000000; | 1575 | pcdev->mclk = 20000000; |
1580 | } | 1576 | } |
1581 | 1577 | ||
1578 | pcdev->soc_host.dev = &pdev->dev; | ||
1582 | pcdev->mclk_divisor = mclk_get_divisor(pcdev); | 1579 | pcdev->mclk_divisor = mclk_get_divisor(pcdev); |
1583 | 1580 | ||
1584 | INIT_LIST_HEAD(&pcdev->capture); | 1581 | INIT_LIST_HEAD(&pcdev->capture); |
@@ -1644,7 +1641,6 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) | |||
1644 | pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; | 1641 | pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; |
1645 | pcdev->soc_host.ops = &pxa_soc_camera_host_ops; | 1642 | pcdev->soc_host.ops = &pxa_soc_camera_host_ops; |
1646 | pcdev->soc_host.priv = pcdev; | 1643 | pcdev->soc_host.priv = pcdev; |
1647 | pcdev->soc_host.dev = &pdev->dev; | ||
1648 | pcdev->soc_host.nr = pdev->id; | 1644 | pcdev->soc_host.nr = pdev->id; |
1649 | 1645 | ||
1650 | err = soc_camera_host_register(&pcdev->soc_host); | 1646 | err = soc_camera_host_register(&pcdev->soc_host); |
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 06861b782b95..6eebe3ef97d3 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c | |||
@@ -3331,8 +3331,8 @@ struct saa7134_board saa7134_boards[] = { | |||
3331 | .gpio = 0x0200100, | 3331 | .gpio = 0x0200100, |
3332 | }, | 3332 | }, |
3333 | }, | 3333 | }, |
3334 | [SAA7134_BOARD_HAUPPAUGE_HVR1120] = { | 3334 | [SAA7134_BOARD_HAUPPAUGE_HVR1150] = { |
3335 | .name = "Hauppauge WinTV-HVR1120 ATSC/QAM-Hybrid", | 3335 | .name = "Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid", |
3336 | .audio_clock = 0x00187de7, | 3336 | .audio_clock = 0x00187de7, |
3337 | .tuner_type = TUNER_PHILIPS_TDA8290, | 3337 | .tuner_type = TUNER_PHILIPS_TDA8290, |
3338 | .radio_type = UNSET, | 3338 | .radio_type = UNSET, |
@@ -3363,8 +3363,8 @@ struct saa7134_board saa7134_boards[] = { | |||
3363 | .gpio = 0x0800100, /* GPIO 23 HI for FM */ | 3363 | .gpio = 0x0800100, /* GPIO 23 HI for FM */ |
3364 | }, | 3364 | }, |
3365 | }, | 3365 | }, |
3366 | [SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = { | 3366 | [SAA7134_BOARD_HAUPPAUGE_HVR1120] = { |
3367 | .name = "Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid", | 3367 | .name = "Hauppauge WinTV-HVR1120 DVB-T/Hybrid", |
3368 | .audio_clock = 0x00187de7, | 3368 | .audio_clock = 0x00187de7, |
3369 | .tuner_type = TUNER_PHILIPS_TDA8290, | 3369 | .tuner_type = TUNER_PHILIPS_TDA8290, |
3370 | .radio_type = UNSET, | 3370 | .radio_type = UNSET, |
@@ -5862,31 +5862,31 @@ struct pci_device_id saa7134_pci_tbl[] = { | |||
5862 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5862 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
5863 | .subvendor = 0x0070, | 5863 | .subvendor = 0x0070, |
5864 | .subdevice = 0x6706, | 5864 | .subdevice = 0x6706, |
5865 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, | 5865 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, |
5866 | },{ | 5866 | },{ |
5867 | .vendor = PCI_VENDOR_ID_PHILIPS, | 5867 | .vendor = PCI_VENDOR_ID_PHILIPS, |
5868 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5868 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
5869 | .subvendor = 0x0070, | 5869 | .subvendor = 0x0070, |
5870 | .subdevice = 0x6707, | 5870 | .subdevice = 0x6707, |
5871 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, | 5871 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, |
5872 | },{ | 5872 | },{ |
5873 | .vendor = PCI_VENDOR_ID_PHILIPS, | 5873 | .vendor = PCI_VENDOR_ID_PHILIPS, |
5874 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5874 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
5875 | .subvendor = 0x0070, | 5875 | .subvendor = 0x0070, |
5876 | .subdevice = 0x6708, | 5876 | .subdevice = 0x6708, |
5877 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, | 5877 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, |
5878 | },{ | 5878 | },{ |
5879 | .vendor = PCI_VENDOR_ID_PHILIPS, | 5879 | .vendor = PCI_VENDOR_ID_PHILIPS, |
5880 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5880 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
5881 | .subvendor = 0x0070, | 5881 | .subvendor = 0x0070, |
5882 | .subdevice = 0x6709, | 5882 | .subdevice = 0x6709, |
5883 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, | 5883 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, |
5884 | },{ | 5884 | },{ |
5885 | .vendor = PCI_VENDOR_ID_PHILIPS, | 5885 | .vendor = PCI_VENDOR_ID_PHILIPS, |
5886 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5886 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
5887 | .subvendor = 0x0070, | 5887 | .subvendor = 0x0070, |
5888 | .subdevice = 0x670a, | 5888 | .subdevice = 0x670a, |
5889 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, | 5889 | .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, |
5890 | },{ | 5890 | },{ |
5891 | .vendor = PCI_VENDOR_ID_PHILIPS, | 5891 | .vendor = PCI_VENDOR_ID_PHILIPS, |
5892 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, | 5892 | .device = PCI_DEVICE_ID_PHILIPS_SAA7133, |
@@ -6363,8 +6363,8 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev, | |||
6363 | switch (command) { | 6363 | switch (command) { |
6364 | case TDA18271_CALLBACK_CMD_AGC_ENABLE: /* 0 */ | 6364 | case TDA18271_CALLBACK_CMD_AGC_ENABLE: /* 0 */ |
6365 | switch (dev->board) { | 6365 | switch (dev->board) { |
6366 | case SAA7134_BOARD_HAUPPAUGE_HVR1150: | ||
6366 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: | 6367 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: |
6367 | case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: | ||
6368 | ret = saa7134_tda18271_hvr11x0_toggle_agc(dev, arg); | 6368 | ret = saa7134_tda18271_hvr11x0_toggle_agc(dev, arg); |
6369 | break; | 6369 | break; |
6370 | default: | 6370 | default: |
@@ -6384,8 +6384,8 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev, | |||
6384 | int ret; | 6384 | int ret; |
6385 | 6385 | ||
6386 | switch (dev->board) { | 6386 | switch (dev->board) { |
6387 | case SAA7134_BOARD_HAUPPAUGE_HVR1150: | ||
6387 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: | 6388 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: |
6388 | case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: | ||
6389 | /* tda8290 + tda18271 */ | 6389 | /* tda8290 + tda18271 */ |
6390 | ret = saa7134_tda8290_18271_callback(dev, command, arg); | 6390 | ret = saa7134_tda8290_18271_callback(dev, command, arg); |
6391 | break; | 6391 | break; |
@@ -6427,7 +6427,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data) | |||
6427 | switch (tv.model) { | 6427 | switch (tv.model) { |
6428 | case 67019: /* WinTV-HVR1110 (Retail, IR Blaster, hybrid, FM, SVid/Comp, 3.5mm audio in) */ | 6428 | case 67019: /* WinTV-HVR1110 (Retail, IR Blaster, hybrid, FM, SVid/Comp, 3.5mm audio in) */ |
6429 | case 67109: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ | 6429 | case 67109: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ |
6430 | case 67201: /* WinTV-HVR1120 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ | 6430 | case 67201: /* WinTV-HVR1150 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ |
6431 | case 67301: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ | 6431 | case 67301: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ |
6432 | case 67209: /* WinTV-HVR1110 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ | 6432 | case 67209: /* WinTV-HVR1110 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ |
6433 | case 67559: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ | 6433 | case 67559: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ |
@@ -6435,7 +6435,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data) | |||
6435 | case 67579: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM) */ | 6435 | case 67579: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM) */ |
6436 | case 67589: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ | 6436 | case 67589: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ |
6437 | case 67599: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ | 6437 | case 67599: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ |
6438 | case 67651: /* WinTV-HVR1120 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ | 6438 | case 67651: /* WinTV-HVR1150 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ |
6439 | case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ | 6439 | case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ |
6440 | break; | 6440 | break; |
6441 | default: | 6441 | default: |
@@ -6625,8 +6625,8 @@ int saa7134_board_init1(struct saa7134_dev *dev) | |||
6625 | 6625 | ||
6626 | saa_writeb (SAA7134_PRODUCTION_TEST_MODE, 0x00); | 6626 | saa_writeb (SAA7134_PRODUCTION_TEST_MODE, 0x00); |
6627 | break; | 6627 | break; |
6628 | case SAA7134_BOARD_HAUPPAUGE_HVR1150: | ||
6628 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: | 6629 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: |
6629 | case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: | ||
6630 | /* GPIO 26 high for digital, low for analog */ | 6630 | /* GPIO 26 high for digital, low for analog */ |
6631 | saa7134_set_gpio(dev, 26, 0); | 6631 | saa7134_set_gpio(dev, 26, 0); |
6632 | msleep(1); | 6632 | msleep(1); |
@@ -6891,8 +6891,8 @@ int saa7134_board_init2(struct saa7134_dev *dev) | |||
6891 | dev->name, saa7134_boards[dev->board].name); | 6891 | dev->name, saa7134_boards[dev->board].name); |
6892 | } | 6892 | } |
6893 | break; | 6893 | break; |
6894 | case SAA7134_BOARD_HAUPPAUGE_HVR1150: | ||
6894 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: | 6895 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: |
6895 | case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: | ||
6896 | hauppauge_eeprom(dev, dev->eedata+0x80); | 6896 | hauppauge_eeprom(dev, dev->eedata+0x80); |
6897 | break; | 6897 | break; |
6898 | case SAA7134_BOARD_HAUPPAUGE_HVR1110: | 6898 | case SAA7134_BOARD_HAUPPAUGE_HVR1110: |
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c index 31930f26ffc7..98f3efd1e944 100644 --- a/drivers/media/video/saa7134/saa7134-dvb.c +++ b/drivers/media/video/saa7134/saa7134-dvb.c | |||
@@ -1119,7 +1119,7 @@ static int dvb_init(struct saa7134_dev *dev) | |||
1119 | &tda827x_cfg_2) < 0) | 1119 | &tda827x_cfg_2) < 0) |
1120 | goto dettach_frontend; | 1120 | goto dettach_frontend; |
1121 | break; | 1121 | break; |
1122 | case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: | 1122 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: |
1123 | fe0->dvb.frontend = dvb_attach(tda10048_attach, | 1123 | fe0->dvb.frontend = dvb_attach(tda10048_attach, |
1124 | &hcw_tda10048_config, | 1124 | &hcw_tda10048_config, |
1125 | &dev->i2c_adap); | 1125 | &dev->i2c_adap); |
@@ -1147,7 +1147,7 @@ static int dvb_init(struct saa7134_dev *dev) | |||
1147 | &tda827x_cfg_1) < 0) | 1147 | &tda827x_cfg_1) < 0) |
1148 | goto dettach_frontend; | 1148 | goto dettach_frontend; |
1149 | break; | 1149 | break; |
1150 | case SAA7134_BOARD_HAUPPAUGE_HVR1120: | 1150 | case SAA7134_BOARD_HAUPPAUGE_HVR1150: |
1151 | fe0->dvb.frontend = dvb_attach(lgdt3305_attach, | 1151 | fe0->dvb.frontend = dvb_attach(lgdt3305_attach, |
1152 | &hcw_lgdt3305_config, | 1152 | &hcw_lgdt3305_config, |
1153 | &dev->i2c_adap); | 1153 | &dev->i2c_adap); |
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h index 82268848f26a..fb564f14887c 100644 --- a/drivers/media/video/saa7134/saa7134.h +++ b/drivers/media/video/saa7134/saa7134.h | |||
@@ -278,8 +278,8 @@ struct saa7134_format { | |||
278 | #define SAA7134_BOARD_ASUSTeK_TIGER 152 | 278 | #define SAA7134_BOARD_ASUSTeK_TIGER 152 |
279 | #define SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG 153 | 279 | #define SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG 153 |
280 | #define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154 | 280 | #define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154 |
281 | #define SAA7134_BOARD_HAUPPAUGE_HVR1120 155 | 281 | #define SAA7134_BOARD_HAUPPAUGE_HVR1150 155 |
282 | #define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156 | 282 | #define SAA7134_BOARD_HAUPPAUGE_HVR1120 156 |
283 | #define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157 | 283 | #define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157 |
284 | #define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158 | 284 | #define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158 |
285 | #define SAA7134_BOARD_BEHOLD_505RDS 159 | 285 | #define SAA7134_BOARD_BEHOLD_505RDS 159 |
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 0db88a53d92c..e86878deea71 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c | |||
@@ -282,27 +282,24 @@ out: | |||
282 | return ret; | 282 | return ret; |
283 | } | 283 | } |
284 | 284 | ||
285 | /* Called under spinlock_irqsave(&pcdev->lock, ...) */ | ||
285 | static void sh_mobile_ceu_videobuf_queue(struct videobuf_queue *vq, | 286 | static void sh_mobile_ceu_videobuf_queue(struct videobuf_queue *vq, |
286 | struct videobuf_buffer *vb) | 287 | struct videobuf_buffer *vb) |
287 | { | 288 | { |
288 | struct soc_camera_device *icd = vq->priv_data; | 289 | struct soc_camera_device *icd = vq->priv_data; |
289 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | 290 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); |
290 | struct sh_mobile_ceu_dev *pcdev = ici->priv; | 291 | struct sh_mobile_ceu_dev *pcdev = ici->priv; |
291 | unsigned long flags; | ||
292 | 292 | ||
293 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, | 293 | dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, |
294 | vb, vb->baddr, vb->bsize); | 294 | vb, vb->baddr, vb->bsize); |
295 | 295 | ||
296 | vb->state = VIDEOBUF_QUEUED; | 296 | vb->state = VIDEOBUF_QUEUED; |
297 | spin_lock_irqsave(&pcdev->lock, flags); | ||
298 | list_add_tail(&vb->queue, &pcdev->capture); | 297 | list_add_tail(&vb->queue, &pcdev->capture); |
299 | 298 | ||
300 | if (!pcdev->active) { | 299 | if (!pcdev->active) { |
301 | pcdev->active = vb; | 300 | pcdev->active = vb; |
302 | sh_mobile_ceu_capture(pcdev); | 301 | sh_mobile_ceu_capture(pcdev); |
303 | } | 302 | } |
304 | |||
305 | spin_unlock_irqrestore(&pcdev->lock, flags); | ||
306 | } | 303 | } |
307 | 304 | ||
308 | static void sh_mobile_ceu_videobuf_release(struct videobuf_queue *vq, | 305 | static void sh_mobile_ceu_videobuf_release(struct videobuf_queue *vq, |
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c index 4d6785e63455..b154bd961e3b 100644 --- a/drivers/media/video/stk-webcam.c +++ b/drivers/media/video/stk-webcam.c | |||
@@ -1050,8 +1050,8 @@ static int stk_setup_format(struct stk_camera *dev) | |||
1050 | depth = 1; | 1050 | depth = 1; |
1051 | else | 1051 | else |
1052 | depth = 2; | 1052 | depth = 2; |
1053 | while (stk_sizes[i].m != dev->vsettings.mode | 1053 | while (i < ARRAY_SIZE(stk_sizes) && |
1054 | && i < ARRAY_SIZE(stk_sizes)) | 1054 | stk_sizes[i].m != dev->vsettings.mode) |
1055 | i++; | 1055 | i++; |
1056 | if (i == ARRAY_SIZE(stk_sizes)) { | 1056 | if (i == ARRAY_SIZE(stk_sizes)) { |
1057 | STK_ERROR("Something is broken in %s\n", __func__); | 1057 | STK_ERROR("Something is broken in %s\n", __func__); |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 89927b7aec28..04b47832fa0a 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -1845,11 +1845,29 @@ static struct usb_device_id uvc_ids[] = { | |||
1845 | .bInterfaceSubClass = 1, | 1845 | .bInterfaceSubClass = 1, |
1846 | .bInterfaceProtocol = 0, | 1846 | .bInterfaceProtocol = 0, |
1847 | .driver_info = UVC_QUIRK_STREAM_NO_FID }, | 1847 | .driver_info = UVC_QUIRK_STREAM_NO_FID }, |
1848 | /* ViMicro */ | 1848 | /* ViMicro Vega */ |
1849 | { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 1849 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
1850 | | USB_DEVICE_ID_MATCH_INT_INFO, | ||
1851 | .idVendor = 0x0ac8, | ||
1852 | .idProduct = 0x332d, | ||
1853 | .bInterfaceClass = USB_CLASS_VIDEO, | ||
1854 | .bInterfaceSubClass = 1, | ||
1855 | .bInterfaceProtocol = 0, | ||
1856 | .driver_info = UVC_QUIRK_FIX_BANDWIDTH }, | ||
1857 | /* ViMicro - Minoru3D */ | ||
1858 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | ||
1859 | | USB_DEVICE_ID_MATCH_INT_INFO, | ||
1860 | .idVendor = 0x0ac8, | ||
1861 | .idProduct = 0x3410, | ||
1862 | .bInterfaceClass = USB_CLASS_VIDEO, | ||
1863 | .bInterfaceSubClass = 1, | ||
1864 | .bInterfaceProtocol = 0, | ||
1865 | .driver_info = UVC_QUIRK_FIX_BANDWIDTH }, | ||
1866 | /* ViMicro Venus - Minoru3D */ | ||
1867 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | ||
1850 | | USB_DEVICE_ID_MATCH_INT_INFO, | 1868 | | USB_DEVICE_ID_MATCH_INT_INFO, |
1851 | .idVendor = 0x0ac8, | 1869 | .idVendor = 0x0ac8, |
1852 | .idProduct = 0x0000, | 1870 | .idProduct = 0x3420, |
1853 | .bInterfaceClass = USB_CLASS_VIDEO, | 1871 | .bInterfaceClass = USB_CLASS_VIDEO, |
1854 | .bInterfaceSubClass = 1, | 1872 | .bInterfaceSubClass = 1, |
1855 | .bInterfaceProtocol = 0, | 1873 | .bInterfaceProtocol = 0, |
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c index f152a9903862..1ca6dff73612 100644 --- a/drivers/media/video/uvc/uvc_status.c +++ b/drivers/media/video/uvc/uvc_status.c | |||
@@ -145,8 +145,8 @@ static void uvc_status_complete(struct urb *urb) | |||
145 | break; | 145 | break; |
146 | 146 | ||
147 | default: | 147 | default: |
148 | uvc_printk(KERN_INFO, "unknown event type %u.\n", | 148 | uvc_trace(UVC_TRACE_STATUS, "Unknown status event " |
149 | dev->status[0]); | 149 | "type %u.\n", dev->status[0]); |
150 | break; | 150 | break; |
151 | } | 151 | } |
152 | } | 152 | } |
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index be64a502ea27..f2afc4e08379 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c | |||
@@ -1081,8 +1081,10 @@ static long __video_do_ioctl(struct file *file, | |||
1081 | /* Calls the specific handler */ | 1081 | /* Calls the specific handler */ |
1082 | if (ops->vidioc_g_std) | 1082 | if (ops->vidioc_g_std) |
1083 | ret = ops->vidioc_g_std(file, fh, id); | 1083 | ret = ops->vidioc_g_std(file, fh, id); |
1084 | else | 1084 | else if (vfd->current_norm) |
1085 | *id = vfd->current_norm; | 1085 | *id = vfd->current_norm; |
1086 | else | ||
1087 | ret = -EINVAL; | ||
1086 | 1088 | ||
1087 | if (!ret) | 1089 | if (!ret) |
1088 | dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id); | 1090 | dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id); |
@@ -1553,12 +1555,19 @@ static long __video_do_ioctl(struct file *file, | |||
1553 | break; | 1555 | break; |
1554 | ret = ops->vidioc_g_parm(file, fh, p); | 1556 | ret = ops->vidioc_g_parm(file, fh, p); |
1555 | } else { | 1557 | } else { |
1558 | v4l2_std_id std = vfd->current_norm; | ||
1559 | |||
1556 | if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | 1560 | if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) |
1557 | return -EINVAL; | 1561 | return -EINVAL; |
1558 | 1562 | ||
1559 | v4l2_video_std_frame_period(vfd->current_norm, | ||
1560 | &p->parm.capture.timeperframe); | ||
1561 | ret = 0; | 1563 | ret = 0; |
1564 | if (ops->vidioc_g_std) | ||
1565 | ret = ops->vidioc_g_std(file, fh, &std); | ||
1566 | else if (std == 0) | ||
1567 | ret = -EINVAL; | ||
1568 | if (ret == 0) | ||
1569 | v4l2_video_std_frame_period(std, | ||
1570 | &p->parm.capture.timeperframe); | ||
1562 | } | 1571 | } |
1563 | 1572 | ||
1564 | dbgarg(cmd, "type=%d\n", p->type); | 1573 | dbgarg(cmd, "type=%d\n", p->type); |
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c index fc976f42f432..2622a6e63da1 100644 --- a/drivers/media/video/zr364xx.c +++ b/drivers/media/video/zr364xx.c | |||
@@ -695,7 +695,7 @@ static int zr364xx_release(struct file *file) | |||
695 | for (i = 0; i < 2; i++) { | 695 | for (i = 0; i < 2; i++) { |
696 | err = | 696 | err = |
697 | send_control_msg(udev, 1, init[cam->method][i].value, | 697 | send_control_msg(udev, 1, init[cam->method][i].value, |
698 | 0, init[i][cam->method].bytes, | 698 | 0, init[cam->method][i].bytes, |
699 | init[cam->method][i].size); | 699 | init[cam->method][i].size); |
700 | if (err < 0) { | 700 | if (err < 0) { |
701 | dev_err(&udev->dev, "error during release sequence\n"); | 701 | dev_err(&udev->dev, "error during release sequence\n"); |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index ae5fe91867e1..10ed195c0c1c 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
736 | flash->partitioned = 1; | 736 | flash->partitioned = 1; |
737 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); | 737 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); |
738 | } | 738 | } |
739 | } else if (data->nr_parts) | 739 | } else if (data && data->nr_parts) |
740 | dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", | 740 | dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", |
741 | data->nr_parts, data->name); | 741 | data->nr_parts, data->name); |
742 | 742 | ||
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/drivers/mtd/maps/sbc8240.c +++ /dev/null | |||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 7ad972229db4..0d9d4bc9c762 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
61 | buf64 = (uint64_t *)buf; | 61 | buf64 = (uint64_t *)buf; |
62 | while (i < len/8) { | 62 | while (i < len/8) { |
63 | uint64_t x; | 63 | uint64_t x; |
64 | asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base)); | 64 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); |
65 | buf64[i++] = x; | 65 | buf64[i++] = x; |
66 | } | 66 | } |
67 | i *= 8; | 67 | i *= 8; |
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index fb86cacd5bdb..1002e1882996 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
135 | int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | 135 | int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, |
136 | size_t *retlen, uint8_t *buf) | 136 | size_t *retlen, uint8_t *buf) |
137 | { | 137 | { |
138 | loff_t mask = mtd->writesize - 1; | ||
138 | struct mtd_oob_ops ops; | 139 | struct mtd_oob_ops ops; |
139 | int res; | 140 | int res; |
140 | 141 | ||
141 | ops.mode = MTD_OOB_PLACE; | 142 | ops.mode = MTD_OOB_PLACE; |
142 | ops.ooboffs = offs & (mtd->writesize - 1); | 143 | ops.ooboffs = offs & mask; |
143 | ops.ooblen = len; | 144 | ops.ooblen = len; |
144 | ops.oobbuf = buf; | 145 | ops.oobbuf = buf; |
145 | ops.datbuf = NULL; | 146 | ops.datbuf = NULL; |
146 | 147 | ||
147 | res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 148 | res = mtd->read_oob(mtd, offs & ~mask, &ops); |
148 | *retlen = ops.oobretlen; | 149 | *retlen = ops.oobretlen; |
149 | return res; | 150 | return res; |
150 | } | 151 | } |
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
155 | int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | 156 | int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, |
156 | size_t *retlen, uint8_t *buf) | 157 | size_t *retlen, uint8_t *buf) |
157 | { | 158 | { |
159 | loff_t mask = mtd->writesize - 1; | ||
158 | struct mtd_oob_ops ops; | 160 | struct mtd_oob_ops ops; |
159 | int res; | 161 | int res; |
160 | 162 | ||
161 | ops.mode = MTD_OOB_PLACE; | 163 | ops.mode = MTD_OOB_PLACE; |
162 | ops.ooboffs = offs & (mtd->writesize - 1); | 164 | ops.ooboffs = offs & mask; |
163 | ops.ooblen = len; | 165 | ops.ooblen = len; |
164 | ops.oobbuf = buf; | 166 | ops.oobbuf = buf; |
165 | ops.datbuf = NULL; | 167 | ops.datbuf = NULL; |
166 | 168 | ||
167 | res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 169 | res = mtd->write_oob(mtd, offs & ~mask, &ops); |
168 | *retlen = ops.oobretlen; | 170 | *retlen = ops.oobretlen; |
169 | return res; | 171 | return res; |
170 | } | 172 | } |
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
177 | static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, | 179 | static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, |
178 | size_t *retlen, uint8_t *buf, uint8_t *oob) | 180 | size_t *retlen, uint8_t *buf, uint8_t *oob) |
179 | { | 181 | { |
182 | loff_t mask = mtd->writesize - 1; | ||
180 | struct mtd_oob_ops ops; | 183 | struct mtd_oob_ops ops; |
181 | int res; | 184 | int res; |
182 | 185 | ||
183 | ops.mode = MTD_OOB_PLACE; | 186 | ops.mode = MTD_OOB_PLACE; |
184 | ops.ooboffs = offs; | 187 | ops.ooboffs = offs & mask; |
185 | ops.ooblen = mtd->oobsize; | 188 | ops.ooblen = mtd->oobsize; |
186 | ops.oobbuf = oob; | 189 | ops.oobbuf = oob; |
187 | ops.datbuf = buf; | 190 | ops.datbuf = buf; |
188 | ops.len = len; | 191 | ops.len = len; |
189 | 192 | ||
190 | res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 193 | res = mtd->write_oob(mtd, offs & ~mask, &ops); |
191 | *retlen = ops.retlen; | 194 | *retlen = ops.retlen; |
192 | return res; | 195 | return res; |
193 | } | 196 | } |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 8b3e76c1cf52..34e776c5f06b 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -1383,6 +1383,7 @@ static int cxgb_open(struct net_device *dev) | |||
1383 | if (!other_ports) | 1383 | if (!other_ports) |
1384 | schedule_chk_task(adapter); | 1384 | schedule_chk_task(adapter); |
1385 | 1385 | ||
1386 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); | ||
1386 | return 0; | 1387 | return 0; |
1387 | } | 1388 | } |
1388 | 1389 | ||
@@ -1415,6 +1416,7 @@ static int cxgb_close(struct net_device *dev) | |||
1415 | if (!adapter->open_device_map) | 1416 | if (!adapter->open_device_map) |
1416 | cxgb_down(adapter); | 1417 | cxgb_down(adapter); |
1417 | 1418 | ||
1419 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); | ||
1418 | return 0; | 1420 | return 0; |
1419 | } | 1421 | } |
1420 | 1422 | ||
@@ -2814,7 +2816,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset) | |||
2814 | 2816 | ||
2815 | if (is_offload(adapter) && | 2817 | if (is_offload(adapter) && |
2816 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | 2818 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { |
2817 | cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); | 2819 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); |
2818 | offload_close(&adapter->tdev); | 2820 | offload_close(&adapter->tdev); |
2819 | } | 2821 | } |
2820 | 2822 | ||
@@ -2879,7 +2881,7 @@ static void t3_resume_ports(struct adapter *adapter) | |||
2879 | } | 2881 | } |
2880 | 2882 | ||
2881 | if (is_offload(adapter) && !ofld_disable) | 2883 | if (is_offload(adapter) && !ofld_disable) |
2882 | cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); | 2884 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); |
2883 | } | 2885 | } |
2884 | 2886 | ||
2885 | /* | 2887 | /* |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index f9f54b57b28c..75064eea1d87 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev) | |||
153 | mutex_unlock(&cxgb3_db_lock); | 153 | mutex_unlock(&cxgb3_db_lock); |
154 | } | 154 | } |
155 | 155 | ||
156 | void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) | 156 | void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) |
157 | { | 157 | { |
158 | struct cxgb3_client *client; | 158 | struct cxgb3_client *client; |
159 | 159 | ||
160 | mutex_lock(&cxgb3_db_lock); | 160 | mutex_lock(&cxgb3_db_lock); |
161 | list_for_each_entry(client, &client_list, client_list) { | 161 | list_for_each_entry(client, &client_list, client_list) { |
162 | if (client->err_handler) | 162 | if (client->event_handler) |
163 | client->err_handler(tdev, status, error); | 163 | client->event_handler(tdev, event, port); |
164 | } | 164 | } |
165 | mutex_unlock(&cxgb3_db_lock); | 165 | mutex_unlock(&cxgb3_db_lock); |
166 | } | 166 | } |
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index 55945f422aec..670aa62042da 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h | |||
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client); | |||
64 | void cxgb3_unregister_client(struct cxgb3_client *client); | 64 | void cxgb3_unregister_client(struct cxgb3_client *client); |
65 | void cxgb3_add_clients(struct t3cdev *tdev); | 65 | void cxgb3_add_clients(struct t3cdev *tdev); |
66 | void cxgb3_remove_clients(struct t3cdev *tdev); | 66 | void cxgb3_remove_clients(struct t3cdev *tdev); |
67 | void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); | 67 | void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); |
68 | 68 | ||
69 | typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, | 69 | typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, |
70 | struct sk_buff *skb, void *ctx); | 70 | struct sk_buff *skb, void *ctx); |
71 | 71 | ||
72 | enum { | 72 | enum { |
73 | OFFLOAD_STATUS_UP, | 73 | OFFLOAD_STATUS_UP, |
74 | OFFLOAD_STATUS_DOWN | 74 | OFFLOAD_STATUS_DOWN, |
75 | OFFLOAD_PORT_DOWN, | ||
76 | OFFLOAD_PORT_UP | ||
75 | }; | 77 | }; |
76 | 78 | ||
77 | struct cxgb3_client { | 79 | struct cxgb3_client { |
@@ -82,7 +84,7 @@ struct cxgb3_client { | |||
82 | int (*redirect)(void *ctx, struct dst_entry *old, | 84 | int (*redirect)(void *ctx, struct dst_entry *old, |
83 | struct dst_entry *new, struct l2t_entry *l2t); | 85 | struct dst_entry *new, struct l2t_entry *l2t); |
84 | struct list_head client_list; | 86 | struct list_head client_list; |
85 | void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); | 87 | void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); |
86 | }; | 88 | }; |
87 | 89 | ||
88 | /* | 90 | /* |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 772104c5ed26..1e5289ffef6f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -490,7 +490,7 @@ static int gfar_remove(struct of_device *ofdev) | |||
490 | 490 | ||
491 | dev_set_drvdata(&ofdev->dev, NULL); | 491 | dev_set_drvdata(&ofdev->dev, NULL); |
492 | 492 | ||
493 | unregister_netdev(dev); | 493 | unregister_netdev(priv->ndev); |
494 | iounmap(priv->regs); | 494 | iounmap(priv->regs); |
495 | free_netdev(priv->ndev); | 495 | free_netdev(priv->ndev); |
496 | 496 | ||
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index ac57b6a42c6e..ccfe276943f0 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -34,7 +34,6 @@ | |||
34 | * SOFTWARE. | 34 | * SOFTWARE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/hardirq.h> | 37 | #include <linux/hardirq.h> |
39 | 38 | ||
40 | #include <linux/mlx4/cmd.h> | 39 | #include <linux/mlx4/cmd.h> |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index b9ceddde46c0..bffb7995cb70 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
36 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
37 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
@@ -42,6 +41,10 @@ | |||
42 | #include "fw.h" | 41 | #include "fw.h" |
43 | 42 | ||
44 | enum { | 43 | enum { |
44 | MLX4_IRQNAME_SIZE = 64 | ||
45 | }; | ||
46 | |||
47 | enum { | ||
45 | MLX4_NUM_ASYNC_EQE = 0x100, | 48 | MLX4_NUM_ASYNC_EQE = 0x100, |
46 | MLX4_NUM_SPARE_EQE = 0x80, | 49 | MLX4_NUM_SPARE_EQE = 0x80, |
47 | MLX4_EQ_ENTRY_SIZE = 0x20 | 50 | MLX4_EQ_ENTRY_SIZE = 0x20 |
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |||
526 | iounmap(priv->clr_base); | 529 | iounmap(priv->clr_base); |
527 | } | 530 | } |
528 | 531 | ||
529 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | ||
530 | { | ||
531 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
532 | int ret; | ||
533 | |||
534 | /* | ||
535 | * We assume that mapping one page is enough for the whole EQ | ||
536 | * context table. This is fine with all current HCAs, because | ||
537 | * we only use 32 EQs and each EQ uses 64 bytes of context | ||
538 | * memory, or 1 KB total. | ||
539 | */ | ||
540 | priv->eq_table.icm_virt = icm_virt; | ||
541 | priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | ||
542 | if (!priv->eq_table.icm_page) | ||
543 | return -ENOMEM; | ||
544 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | ||
545 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
546 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { | ||
547 | __free_page(priv->eq_table.icm_page); | ||
548 | return -ENOMEM; | ||
549 | } | ||
550 | |||
551 | ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); | ||
552 | if (ret) { | ||
553 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
554 | PCI_DMA_BIDIRECTIONAL); | ||
555 | __free_page(priv->eq_table.icm_page); | ||
556 | } | ||
557 | |||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | ||
562 | { | ||
563 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
564 | |||
565 | mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); | ||
566 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
567 | PCI_DMA_BIDIRECTIONAL); | ||
568 | __free_page(priv->eq_table.icm_page); | ||
569 | } | ||
570 | |||
571 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | 532 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
572 | { | 533 | { |
573 | struct mlx4_priv *priv = mlx4_priv(dev); | 534 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
615 | priv->eq_table.clr_int = priv->clr_base + | 576 | priv->eq_table.clr_int = priv->clr_base + |
616 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 577 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
617 | 578 | ||
618 | priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); | 579 | priv->eq_table.irq_names = |
580 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), | ||
581 | GFP_KERNEL); | ||
619 | if (!priv->eq_table.irq_names) { | 582 | if (!priv->eq_table.irq_names) { |
620 | err = -ENOMEM; | 583 | err = -ENOMEM; |
621 | goto err_out_bitmap; | 584 | goto err_out_bitmap; |
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
638 | goto err_out_comp; | 601 | goto err_out_comp; |
639 | 602 | ||
640 | if (dev->flags & MLX4_FLAG_MSI_X) { | 603 | if (dev->flags & MLX4_FLAG_MSI_X) { |
641 | static const char async_eq_name[] = "mlx4-async"; | ||
642 | const char *eq_name; | 604 | const char *eq_name; |
643 | 605 | ||
644 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { | 606 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
645 | if (i < dev->caps.num_comp_vectors) { | 607 | if (i < dev->caps.num_comp_vectors) { |
646 | snprintf(priv->eq_table.irq_names + i * 16, 16, | 608 | snprintf(priv->eq_table.irq_names + |
647 | "mlx4-comp-%d", i); | 609 | i * MLX4_IRQNAME_SIZE, |
648 | eq_name = priv->eq_table.irq_names + i * 16; | 610 | MLX4_IRQNAME_SIZE, |
649 | } else | 611 | "mlx4-comp-%d@pci:%s", i, |
650 | eq_name = async_eq_name; | 612 | pci_name(dev->pdev)); |
613 | } else { | ||
614 | snprintf(priv->eq_table.irq_names + | ||
615 | i * MLX4_IRQNAME_SIZE, | ||
616 | MLX4_IRQNAME_SIZE, | ||
617 | "mlx4-async@pci:%s", | ||
618 | pci_name(dev->pdev)); | ||
619 | } | ||
651 | 620 | ||
621 | eq_name = priv->eq_table.irq_names + | ||
622 | i * MLX4_IRQNAME_SIZE; | ||
652 | err = request_irq(priv->eq_table.eq[i].irq, | 623 | err = request_irq(priv->eq_table.eq[i].irq, |
653 | mlx4_msi_x_interrupt, 0, eq_name, | 624 | mlx4_msi_x_interrupt, 0, eq_name, |
654 | priv->eq_table.eq + i); | 625 | priv->eq_table.eq + i); |
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
658 | priv->eq_table.eq[i].have_irq = 1; | 629 | priv->eq_table.eq[i].have_irq = 1; |
659 | } | 630 | } |
660 | } else { | 631 | } else { |
632 | snprintf(priv->eq_table.irq_names, | ||
633 | MLX4_IRQNAME_SIZE, | ||
634 | DRV_NAME "@pci:%s", | ||
635 | pci_name(dev->pdev)); | ||
661 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 636 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
662 | IRQF_SHARED, DRV_NAME, dev); | 637 | IRQF_SHARED, priv->eq_table.irq_names, dev); |
663 | if (err) | 638 | if (err) |
664 | goto err_out_async; | 639 | goto err_out_async; |
665 | 640 | ||
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index baf4bf66062c..04b382fcb8c8 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
37 | #include <linux/scatterlist.h> | 36 | #include <linux/scatterlist.h> |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index dac621b1e9fc..3dd481e77f92 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
525 | goto err_unmap_aux; | 525 | goto err_unmap_aux; |
526 | } | 526 | } |
527 | 527 | ||
528 | err = mlx4_map_eq_icm(dev, init_hca->eqc_base); | 528 | err = mlx4_init_icm_table(dev, &priv->eq_table.table, |
529 | init_hca->eqc_base, dev_cap->eqc_entry_sz, | ||
530 | dev->caps.num_eqs, dev->caps.num_eqs, | ||
531 | 0, 0); | ||
529 | if (err) { | 532 | if (err) { |
530 | mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); | 533 | mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); |
531 | goto err_unmap_cmpt; | 534 | goto err_unmap_cmpt; |
@@ -668,7 +671,7 @@ err_unmap_mtt: | |||
668 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | 671 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); |
669 | 672 | ||
670 | err_unmap_eq: | 673 | err_unmap_eq: |
671 | mlx4_unmap_eq_icm(dev); | 674 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); |
672 | 675 | ||
673 | err_unmap_cmpt: | 676 | err_unmap_cmpt: |
674 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | 677 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); |
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
698 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); | 701 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); |
699 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); | 702 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); |
700 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | 703 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); |
704 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); | ||
701 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | 705 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); |
702 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); | 706 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); |
703 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); | 707 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); |
704 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); | 708 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); |
705 | mlx4_unmap_eq_icm(dev); | ||
706 | 709 | ||
707 | mlx4_UNMAP_ICM_AUX(dev); | 710 | mlx4_UNMAP_ICM_AUX(dev); |
708 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | 711 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
786 | return 0; | 789 | return 0; |
787 | 790 | ||
788 | err_close: | 791 | err_close: |
789 | mlx4_close_hca(dev); | 792 | mlx4_CLOSE_HCA(dev, 0); |
790 | 793 | ||
791 | err_free_icm: | 794 | err_free_icm: |
792 | mlx4_free_icms(dev); | 795 | mlx4_free_icms(dev); |
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1070 | goto err_disable_pdev; | 1073 | goto err_disable_pdev; |
1071 | } | 1074 | } |
1072 | 1075 | ||
1073 | err = pci_request_region(pdev, 0, DRV_NAME); | 1076 | err = pci_request_regions(pdev, DRV_NAME); |
1074 | if (err) { | 1077 | if (err) { |
1075 | dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); | 1078 | dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); |
1076 | goto err_disable_pdev; | 1079 | goto err_disable_pdev; |
1077 | } | 1080 | } |
1078 | 1081 | ||
1079 | err = pci_request_region(pdev, 2, DRV_NAME); | ||
1080 | if (err) { | ||
1081 | dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n"); | ||
1082 | goto err_release_bar0; | ||
1083 | } | ||
1084 | |||
1085 | pci_set_master(pdev); | 1082 | pci_set_master(pdev); |
1086 | 1083 | ||
1087 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 1084 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1090 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 1087 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1091 | if (err) { | 1088 | if (err) { |
1092 | dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); | 1089 | dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); |
1093 | goto err_release_bar2; | 1090 | goto err_release_regions; |
1094 | } | 1091 | } |
1095 | } | 1092 | } |
1096 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 1093 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1101 | if (err) { | 1098 | if (err) { |
1102 | dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " | 1099 | dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " |
1103 | "aborting.\n"); | 1100 | "aborting.\n"); |
1104 | goto err_release_bar2; | 1101 | goto err_release_regions; |
1105 | } | 1102 | } |
1106 | } | 1103 | } |
1107 | 1104 | ||
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1110 | dev_err(&pdev->dev, "Device struct alloc failed, " | 1107 | dev_err(&pdev->dev, "Device struct alloc failed, " |
1111 | "aborting.\n"); | 1108 | "aborting.\n"); |
1112 | err = -ENOMEM; | 1109 | err = -ENOMEM; |
1113 | goto err_release_bar2; | 1110 | goto err_release_regions; |
1114 | } | 1111 | } |
1115 | 1112 | ||
1116 | dev = &priv->dev; | 1113 | dev = &priv->dev; |
@@ -1205,11 +1202,8 @@ err_cmd: | |||
1205 | err_free_dev: | 1202 | err_free_dev: |
1206 | kfree(priv); | 1203 | kfree(priv); |
1207 | 1204 | ||
1208 | err_release_bar2: | 1205 | err_release_regions: |
1209 | pci_release_region(pdev, 2); | 1206 | pci_release_regions(pdev); |
1210 | |||
1211 | err_release_bar0: | ||
1212 | pci_release_region(pdev, 0); | ||
1213 | 1207 | ||
1214 | err_disable_pdev: | 1208 | err_disable_pdev: |
1215 | pci_disable_device(pdev); | 1209 | pci_disable_device(pdev); |
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1265 | pci_disable_msix(pdev); | 1259 | pci_disable_msix(pdev); |
1266 | 1260 | ||
1267 | kfree(priv); | 1261 | kfree(priv); |
1268 | pci_release_region(pdev, 2); | 1262 | pci_release_regions(pdev); |
1269 | pci_release_region(pdev, 0); | ||
1270 | pci_disable_device(pdev); | 1263 | pci_disable_device(pdev); |
1271 | pci_set_drvdata(pdev, NULL); | 1264 | pci_set_drvdata(pdev, NULL); |
1272 | } | 1265 | } |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 6053c357a470..5ccbce9866fe 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/string.h> | 34 | #include <linux/string.h> |
36 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
37 | 36 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 5bd79c2b184f..bc72d6e4919b 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -205,9 +205,7 @@ struct mlx4_eq_table { | |||
205 | void __iomem **uar_map; | 205 | void __iomem **uar_map; |
206 | u32 clr_mask; | 206 | u32 clr_mask; |
207 | struct mlx4_eq *eq; | 207 | struct mlx4_eq *eq; |
208 | u64 icm_virt; | 208 | struct mlx4_icm_table table; |
209 | struct page *icm_page; | ||
210 | dma_addr_t icm_dma; | ||
211 | struct mlx4_icm_table cmpt_table; | 209 | struct mlx4_icm_table cmpt_table; |
212 | int have_irq; | 210 | int have_irq; |
213 | u8 inta_pin; | 211 | u8 inta_pin; |
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
373 | struct mlx4_dev_cap *dev_cap, | 371 | struct mlx4_dev_cap *dev_cap, |
374 | struct mlx4_init_hca_param *init_hca); | 372 | struct mlx4_init_hca_param *init_hca); |
375 | 373 | ||
376 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt); | ||
377 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev); | ||
378 | |||
379 | int mlx4_cmd_init(struct mlx4_dev *dev); | 374 | int mlx4_cmd_init(struct mlx4_dev *dev); |
380 | void mlx4_cmd_cleanup(struct mlx4_dev *dev); | 375 | void mlx4_cmd_cleanup(struct mlx4_dev *dev); |
381 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); | 376 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index f96948be0a44..ca7ab8e7b4cc 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -32,7 +32,6 @@ | |||
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
37 | 36 | ||
38 | #include <linux/mlx4/cmd.h> | 37 | #include <linux/mlx4/cmd.h> |
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index 26d1a7a9e375..c4988d6bd5b2 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | 35 | ||
37 | #include <asm/page.h> | 36 | #include <asm/page.h> |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index bd22df95adf9..ca25b9dc8378 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
@@ -32,8 +32,6 @@ | |||
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include "mlx4.h" | 35 | #include "mlx4.h" |
38 | #include "fw.h" | 36 | #include "fw.h" |
39 | 37 | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index 1c565ef8d179..42ab9fc01d3e 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -33,8 +33,6 @@ | |||
33 | * SOFTWARE. | 33 | * SOFTWARE. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/init.h> | ||
37 | |||
38 | #include <linux/mlx4/cmd.h> | 36 | #include <linux/mlx4/cmd.h> |
39 | #include <linux/mlx4/qp.h> | 37 | #include <linux/mlx4/qp.h> |
40 | 38 | ||
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c index 3951b884c0fb..e5741dab3825 100644 --- a/drivers/net/mlx4/reset.c +++ b/drivers/net/mlx4/reset.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
37 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index fe9f218691f5..1377d0dc8f1f 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c | |||
@@ -31,8 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | |||
36 | #include <linux/mlx4/cmd.h> | 34 | #include <linux/mlx4/cmd.h> |
37 | 35 | ||
38 | #include "mlx4.h" | 36 | #include "mlx4.h" |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 589a44acdc76..3f5d28851aa2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -127,17 +127,10 @@ static inline struct tun_sock *tun_sk(struct sock *sk) | |||
127 | static int tun_attach(struct tun_struct *tun, struct file *file) | 127 | static int tun_attach(struct tun_struct *tun, struct file *file) |
128 | { | 128 | { |
129 | struct tun_file *tfile = file->private_data; | 129 | struct tun_file *tfile = file->private_data; |
130 | const struct cred *cred = current_cred(); | ||
131 | int err; | 130 | int err; |
132 | 131 | ||
133 | ASSERT_RTNL(); | 132 | ASSERT_RTNL(); |
134 | 133 | ||
135 | /* Check permissions */ | ||
136 | if (((tun->owner != -1 && cred->euid != tun->owner) || | ||
137 | (tun->group != -1 && !in_egroup_p(tun->group))) && | ||
138 | !capable(CAP_NET_ADMIN)) | ||
139 | return -EPERM; | ||
140 | |||
141 | netif_tx_lock_bh(tun->dev); | 134 | netif_tx_lock_bh(tun->dev); |
142 | 135 | ||
143 | err = -EINVAL; | 136 | err = -EINVAL; |
@@ -926,6 +919,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
926 | 919 | ||
927 | dev = __dev_get_by_name(net, ifr->ifr_name); | 920 | dev = __dev_get_by_name(net, ifr->ifr_name); |
928 | if (dev) { | 921 | if (dev) { |
922 | const struct cred *cred = current_cred(); | ||
923 | |||
929 | if (ifr->ifr_flags & IFF_TUN_EXCL) | 924 | if (ifr->ifr_flags & IFF_TUN_EXCL) |
930 | return -EBUSY; | 925 | return -EBUSY; |
931 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) | 926 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) |
@@ -935,6 +930,14 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
935 | else | 930 | else |
936 | return -EINVAL; | 931 | return -EINVAL; |
937 | 932 | ||
933 | if (((tun->owner != -1 && cred->euid != tun->owner) || | ||
934 | (tun->group != -1 && !in_egroup_p(tun->group))) && | ||
935 | !capable(CAP_NET_ADMIN)) | ||
936 | return -EPERM; | ||
937 | err = security_tun_dev_attach(tun->socket.sk); | ||
938 | if (err < 0) | ||
939 | return err; | ||
940 | |||
938 | err = tun_attach(tun, file); | 941 | err = tun_attach(tun, file); |
939 | if (err < 0) | 942 | if (err < 0) |
940 | return err; | 943 | return err; |
@@ -947,6 +950,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
947 | 950 | ||
948 | if (!capable(CAP_NET_ADMIN)) | 951 | if (!capable(CAP_NET_ADMIN)) |
949 | return -EPERM; | 952 | return -EPERM; |
953 | err = security_tun_dev_create(); | ||
954 | if (err < 0) | ||
955 | return err; | ||
950 | 956 | ||
951 | /* Set dev type */ | 957 | /* Set dev type */ |
952 | if (ifr->ifr_flags & IFF_TUN) { | 958 | if (ifr->ifr_flags & IFF_TUN) { |
@@ -988,6 +994,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
988 | 994 | ||
989 | container_of(sk, struct tun_sock, sk)->tun = tun; | 995 | container_of(sk, struct tun_sock, sk)->tun = tun; |
990 | 996 | ||
997 | security_tun_dev_post_create(sk); | ||
998 | |||
991 | tun_net_init(dev); | 999 | tun_net_init(dev); |
992 | 1000 | ||
993 | if (strchr(dev->name, '%')) { | 1001 | if (strchr(dev->name, '%')) { |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 242257b19441..a7aae24f2889 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/oprofile.h> | 23 | #include <linux/oprofile.h> |
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
26 | 25 | ||
27 | #include "event_buffer.h" | 26 | #include "event_buffer.h" |
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val) | |||
407 | return op_cpu_buffer_add_data(entry, val); | 406 | return op_cpu_buffer_add_data(entry, val); |
408 | } | 407 | } |
409 | 408 | ||
409 | int oprofile_add_data64(struct op_entry *entry, u64 val) | ||
410 | { | ||
411 | if (!entry->event) | ||
412 | return 0; | ||
413 | if (op_cpu_buffer_get_size(entry) < 2) | ||
414 | /* | ||
415 | * the function returns 0 to indicate a too small | ||
416 | * buffer, even if there is some space left | ||
417 | */ | ||
418 | return 0; | ||
419 | if (!op_cpu_buffer_add_data(entry, (u32)val)) | ||
420 | return 0; | ||
421 | return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); | ||
422 | } | ||
423 | |||
410 | int oprofile_write_commit(struct op_entry *entry) | 424 | int oprofile_write_commit(struct op_entry *entry) |
411 | { | 425 | { |
412 | if (!entry->event) | 426 | if (!entry->event) |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index 3cffce90f82a..dc8a0428260d 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/oprofile.h> | 13 | #include <linux/oprofile.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/workqueue.h> | ||
16 | #include <linux/time.h> | ||
15 | #include <asm/mutex.h> | 17 | #include <asm/mutex.h> |
16 | 18 | ||
17 | #include "oprof.h" | 19 | #include "oprof.h" |
@@ -87,6 +89,69 @@ out: | |||
87 | return err; | 89 | return err; |
88 | } | 90 | } |
89 | 91 | ||
92 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
93 | |||
94 | static void switch_worker(struct work_struct *work); | ||
95 | static DECLARE_DELAYED_WORK(switch_work, switch_worker); | ||
96 | |||
97 | static void start_switch_worker(void) | ||
98 | { | ||
99 | if (oprofile_ops.switch_events) | ||
100 | schedule_delayed_work(&switch_work, oprofile_time_slice); | ||
101 | } | ||
102 | |||
103 | static void stop_switch_worker(void) | ||
104 | { | ||
105 | cancel_delayed_work_sync(&switch_work); | ||
106 | } | ||
107 | |||
108 | static void switch_worker(struct work_struct *work) | ||
109 | { | ||
110 | if (oprofile_ops.switch_events()) | ||
111 | return; | ||
112 | |||
113 | atomic_inc(&oprofile_stats.multiplex_counter); | ||
114 | start_switch_worker(); | ||
115 | } | ||
116 | |||
117 | /* User inputs in ms, converts to jiffies */ | ||
118 | int oprofile_set_timeout(unsigned long val_msec) | ||
119 | { | ||
120 | int err = 0; | ||
121 | unsigned long time_slice; | ||
122 | |||
123 | mutex_lock(&start_mutex); | ||
124 | |||
125 | if (oprofile_started) { | ||
126 | err = -EBUSY; | ||
127 | goto out; | ||
128 | } | ||
129 | |||
130 | if (!oprofile_ops.switch_events) { | ||
131 | err = -EINVAL; | ||
132 | goto out; | ||
133 | } | ||
134 | |||
135 | time_slice = msecs_to_jiffies(val_msec); | ||
136 | if (time_slice == MAX_JIFFY_OFFSET) { | ||
137 | err = -EINVAL; | ||
138 | goto out; | ||
139 | } | ||
140 | |||
141 | oprofile_time_slice = time_slice; | ||
142 | |||
143 | out: | ||
144 | mutex_unlock(&start_mutex); | ||
145 | return err; | ||
146 | |||
147 | } | ||
148 | |||
149 | #else | ||
150 | |||
151 | static inline void start_switch_worker(void) { } | ||
152 | static inline void stop_switch_worker(void) { } | ||
153 | |||
154 | #endif | ||
90 | 155 | ||
91 | /* Actually start profiling (echo 1>/dev/oprofile/enable) */ | 156 | /* Actually start profiling (echo 1>/dev/oprofile/enable) */ |
92 | int oprofile_start(void) | 157 | int oprofile_start(void) |
@@ -108,6 +173,8 @@ int oprofile_start(void) | |||
108 | if ((err = oprofile_ops.start())) | 173 | if ((err = oprofile_ops.start())) |
109 | goto out; | 174 | goto out; |
110 | 175 | ||
176 | start_switch_worker(); | ||
177 | |||
111 | oprofile_started = 1; | 178 | oprofile_started = 1; |
112 | out: | 179 | out: |
113 | mutex_unlock(&start_mutex); | 180 | mutex_unlock(&start_mutex); |
@@ -123,6 +190,9 @@ void oprofile_stop(void) | |||
123 | goto out; | 190 | goto out; |
124 | oprofile_ops.stop(); | 191 | oprofile_ops.stop(); |
125 | oprofile_started = 0; | 192 | oprofile_started = 0; |
193 | |||
194 | stop_switch_worker(); | ||
195 | |||
126 | /* wake up the daemon to read what remains */ | 196 | /* wake up the daemon to read what remains */ |
127 | wake_up_buffer_waiter(); | 197 | wake_up_buffer_waiter(); |
128 | out: | 198 | out: |
@@ -155,7 +225,6 @@ post_sync: | |||
155 | mutex_unlock(&start_mutex); | 225 | mutex_unlock(&start_mutex); |
156 | } | 226 | } |
157 | 227 | ||
158 | |||
159 | int oprofile_set_backtrace(unsigned long val) | 228 | int oprofile_set_backtrace(unsigned long val) |
160 | { | 229 | { |
161 | int err = 0; | 230 | int err = 0; |
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index c288d3c24b50..cb92f5c98c1a 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h | |||
@@ -24,6 +24,8 @@ struct oprofile_operations; | |||
24 | extern unsigned long oprofile_buffer_size; | 24 | extern unsigned long oprofile_buffer_size; |
25 | extern unsigned long oprofile_cpu_buffer_size; | 25 | extern unsigned long oprofile_cpu_buffer_size; |
26 | extern unsigned long oprofile_buffer_watershed; | 26 | extern unsigned long oprofile_buffer_watershed; |
27 | extern unsigned long oprofile_time_slice; | ||
28 | |||
27 | extern struct oprofile_operations oprofile_ops; | 29 | extern struct oprofile_operations oprofile_ops; |
28 | extern unsigned long oprofile_started; | 30 | extern unsigned long oprofile_started; |
29 | extern unsigned long oprofile_backtrace_depth; | 31 | extern unsigned long oprofile_backtrace_depth; |
@@ -35,5 +37,6 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root); | |||
35 | void oprofile_timer_init(struct oprofile_operations *ops); | 37 | void oprofile_timer_init(struct oprofile_operations *ops); |
36 | 38 | ||
37 | int oprofile_set_backtrace(unsigned long depth); | 39 | int oprofile_set_backtrace(unsigned long depth); |
40 | int oprofile_set_timeout(unsigned long time); | ||
38 | 41 | ||
39 | #endif /* OPROF_H */ | 42 | #endif /* OPROF_H */ |
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index 5d36ffc30dd5..bbd7516e0869 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/fs.h> | 10 | #include <linux/fs.h> |
11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
12 | #include <linux/jiffies.h> | ||
12 | 13 | ||
13 | #include "event_buffer.h" | 14 | #include "event_buffer.h" |
14 | #include "oprofile_stats.h" | 15 | #include "oprofile_stats.h" |
@@ -17,10 +18,51 @@ | |||
17 | #define BUFFER_SIZE_DEFAULT 131072 | 18 | #define BUFFER_SIZE_DEFAULT 131072 |
18 | #define CPU_BUFFER_SIZE_DEFAULT 8192 | 19 | #define CPU_BUFFER_SIZE_DEFAULT 8192 |
19 | #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ | 20 | #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ |
21 | #define TIME_SLICE_DEFAULT 1 | ||
20 | 22 | ||
21 | unsigned long oprofile_buffer_size; | 23 | unsigned long oprofile_buffer_size; |
22 | unsigned long oprofile_cpu_buffer_size; | 24 | unsigned long oprofile_cpu_buffer_size; |
23 | unsigned long oprofile_buffer_watershed; | 25 | unsigned long oprofile_buffer_watershed; |
26 | unsigned long oprofile_time_slice; | ||
27 | |||
28 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
29 | |||
30 | static ssize_t timeout_read(struct file *file, char __user *buf, | ||
31 | size_t count, loff_t *offset) | ||
32 | { | ||
33 | return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice), | ||
34 | buf, count, offset); | ||
35 | } | ||
36 | |||
37 | |||
38 | static ssize_t timeout_write(struct file *file, char const __user *buf, | ||
39 | size_t count, loff_t *offset) | ||
40 | { | ||
41 | unsigned long val; | ||
42 | int retval; | ||
43 | |||
44 | if (*offset) | ||
45 | return -EINVAL; | ||
46 | |||
47 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
48 | if (retval) | ||
49 | return retval; | ||
50 | |||
51 | retval = oprofile_set_timeout(val); | ||
52 | |||
53 | if (retval) | ||
54 | return retval; | ||
55 | return count; | ||
56 | } | ||
57 | |||
58 | |||
59 | static const struct file_operations timeout_fops = { | ||
60 | .read = timeout_read, | ||
61 | .write = timeout_write, | ||
62 | }; | ||
63 | |||
64 | #endif | ||
65 | |||
24 | 66 | ||
25 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) | 67 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) |
26 | { | 68 | { |
@@ -129,6 +171,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root) | |||
129 | oprofile_buffer_size = BUFFER_SIZE_DEFAULT; | 171 | oprofile_buffer_size = BUFFER_SIZE_DEFAULT; |
130 | oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; | 172 | oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; |
131 | oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; | 173 | oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; |
174 | oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT); | ||
132 | 175 | ||
133 | oprofilefs_create_file(sb, root, "enable", &enable_fops); | 176 | oprofilefs_create_file(sb, root, "enable", &enable_fops); |
134 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); | 177 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); |
@@ -139,6 +182,9 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root) | |||
139 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); | 182 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); |
140 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); | 183 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); |
141 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); | 184 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); |
185 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
186 | oprofilefs_create_file(sb, root, "time_slice", &timeout_fops); | ||
187 | #endif | ||
142 | oprofile_create_stats_files(sb, root); | 188 | oprofile_create_stats_files(sb, root); |
143 | if (oprofile_ops.create_files) | 189 | if (oprofile_ops.create_files) |
144 | oprofile_ops.create_files(sb, root); | 190 | oprofile_ops.create_files(sb, root); |
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index 3c2270a8300c..61689e814d46 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c | |||
@@ -34,6 +34,7 @@ void oprofile_reset_stats(void) | |||
34 | atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); | 34 | atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); |
35 | atomic_set(&oprofile_stats.event_lost_overflow, 0); | 35 | atomic_set(&oprofile_stats.event_lost_overflow, 0); |
36 | atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); | 36 | atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); |
37 | atomic_set(&oprofile_stats.multiplex_counter, 0); | ||
37 | } | 38 | } |
38 | 39 | ||
39 | 40 | ||
@@ -76,4 +77,8 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) | |||
76 | &oprofile_stats.event_lost_overflow); | 77 | &oprofile_stats.event_lost_overflow); |
77 | oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", | 78 | oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", |
78 | &oprofile_stats.bt_lost_no_mapping); | 79 | &oprofile_stats.bt_lost_no_mapping); |
80 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
81 | oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter", | ||
82 | &oprofile_stats.multiplex_counter); | ||
83 | #endif | ||
79 | } | 84 | } |
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h index 3da0d08dc1f9..0b54e46c3c14 100644 --- a/drivers/oprofile/oprofile_stats.h +++ b/drivers/oprofile/oprofile_stats.h | |||
@@ -17,6 +17,7 @@ struct oprofile_stat_struct { | |||
17 | atomic_t sample_lost_no_mapping; | 17 | atomic_t sample_lost_no_mapping; |
18 | atomic_t bt_lost_no_mapping; | 18 | atomic_t bt_lost_no_mapping; |
19 | atomic_t event_lost_overflow; | 19 | atomic_t event_lost_overflow; |
20 | atomic_t multiplex_counter; | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | extern struct oprofile_stat_struct oprofile_stats; | 23 | extern struct oprofile_stat_struct oprofile_stats; |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a4494d78e7c2..8aebe1e9d3d6 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -90,11 +90,10 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { | |||
90 | 90 | ||
91 | static DEFINE_MUTEX(sn_hotplug_mutex); | 91 | static DEFINE_MUTEX(sn_hotplug_mutex); |
92 | 92 | ||
93 | static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | 93 | static ssize_t path_show(struct pci_slot *pci_slot, char *buf) |
94 | char *buf) | ||
95 | { | 94 | { |
96 | int retval = -ENOENT; | 95 | int retval = -ENOENT; |
97 | struct slot *slot = bss_hotplug_slot->private; | 96 | struct slot *slot = pci_slot->hotplug->private; |
98 | 97 | ||
99 | if (!slot) | 98 | if (!slot) |
100 | return retval; | 99 | return retval; |
@@ -103,7 +102,7 @@ static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | |||
103 | return retval; | 102 | return retval; |
104 | } | 103 | } |
105 | 104 | ||
106 | static struct hotplug_slot_attribute sn_slot_path_attr = __ATTR_RO(path); | 105 | static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); |
107 | 106 | ||
108 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) | 107 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) |
109 | { | 108 | { |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 4f5b8712931f..44803644ca05 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |||
55 | return desc->irq_2_iommu; | 55 | return desc->irq_2_iommu; |
56 | } | 56 | } |
57 | 57 | ||
58 | static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) | 58 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
59 | { | 59 | { |
60 | struct irq_desc *desc; | 60 | struct irq_desc *desc; |
61 | struct irq_2_iommu *irq_iommu; | 61 | struct irq_2_iommu *irq_iommu; |
62 | 62 | ||
63 | /* | 63 | desc = irq_to_desc(irq); |
64 | * alloc irq desc if not allocated already. | ||
65 | */ | ||
66 | desc = irq_to_desc_alloc_node(irq, node); | ||
67 | if (!desc) { | 64 | if (!desc) { |
68 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | 65 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); |
69 | return NULL; | 66 | return NULL; |
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) | |||
72 | irq_iommu = desc->irq_2_iommu; | 69 | irq_iommu = desc->irq_2_iommu; |
73 | 70 | ||
74 | if (!irq_iommu) | 71 | if (!irq_iommu) |
75 | desc->irq_2_iommu = get_one_free_irq_2_iommu(node); | 72 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); |
76 | 73 | ||
77 | return desc->irq_2_iommu; | 74 | return desc->irq_2_iommu; |
78 | } | 75 | } |
79 | 76 | ||
80 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
81 | { | ||
82 | return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id)); | ||
83 | } | ||
84 | |||
85 | #else /* !CONFIG_SPARSE_IRQ */ | 77 | #else /* !CONFIG_SPARSE_IRQ */ |
86 | 78 | ||
87 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | 79 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index e3a87210e947..e03fe98f0619 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, | |||
598 | } | 598 | } |
599 | 599 | ||
600 | /** | 600 | /** |
601 | * pci_sriov_resource_alignment - get resource alignment for VF BAR | ||
602 | * @dev: the PCI device | ||
603 | * @resno: the resource number | ||
604 | * | ||
605 | * Returns the alignment of the VF BAR found in the SR-IOV capability. | ||
606 | * This is not the same as the resource size which is defined as | ||
607 | * the VF BAR size multiplied by the number of VFs. The alignment | ||
608 | * is just the VF BAR size. | ||
609 | */ | ||
610 | int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) | ||
611 | { | ||
612 | struct resource tmp; | ||
613 | enum pci_bar_type type; | ||
614 | int reg = pci_iov_resource_bar(dev, resno, &type); | ||
615 | |||
616 | if (!reg) | ||
617 | return 0; | ||
618 | |||
619 | __pci_read_base(dev, type, &tmp, reg); | ||
620 | return resource_alignment(&tmp); | ||
621 | } | ||
622 | |||
623 | /** | ||
601 | * pci_restore_iov_state - restore the state of the IOV capability | 624 | * pci_restore_iov_state - restore the state of the IOV capability |
602 | * @dev: the PCI device | 625 | * @dev: the PCI device |
603 | */ | 626 | */ |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d76c4c85367e..f99bc7f089f1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -508,7 +508,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) | |||
508 | return error; | 508 | return error; |
509 | } | 509 | } |
510 | 510 | ||
511 | return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0; | 511 | return pci_restore_state(pci_dev); |
512 | } | 512 | } |
513 | 513 | ||
514 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | 514 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index dbd0f947f497..7b70312181d7 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -846,6 +846,8 @@ pci_restore_state(struct pci_dev *dev) | |||
846 | int i; | 846 | int i; |
847 | u32 val; | 847 | u32 val; |
848 | 848 | ||
849 | if (!dev->state_saved) | ||
850 | return 0; | ||
849 | /* PCI Express register must be restored first */ | 851 | /* PCI Express register must be restored first */ |
850 | pci_restore_pcie_state(dev); | 852 | pci_restore_pcie_state(dev); |
851 | 853 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f73bcbedf37c..5ff4d25bf0e9 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev); | |||
243 | extern void pci_iov_release(struct pci_dev *dev); | 243 | extern void pci_iov_release(struct pci_dev *dev); |
244 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, | 244 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, |
245 | enum pci_bar_type *type); | 245 | enum pci_bar_type *type); |
246 | extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | ||
246 | extern void pci_restore_iov_state(struct pci_dev *dev); | 247 | extern void pci_restore_iov_state(struct pci_dev *dev); |
247 | extern int pci_iov_bus_range(struct pci_bus *bus); | 248 | extern int pci_iov_bus_range(struct pci_bus *bus); |
248 | 249 | ||
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
298 | } | 299 | } |
299 | #endif /* CONFIG_PCI_IOV */ | 300 | #endif /* CONFIG_PCI_IOV */ |
300 | 301 | ||
302 | static inline int pci_resource_alignment(struct pci_dev *dev, | ||
303 | struct resource *res) | ||
304 | { | ||
305 | #ifdef CONFIG_PCI_IOV | ||
306 | int resno = res - dev->resource; | ||
307 | |||
308 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | ||
309 | return pci_sriov_resource_alignment(dev, resno); | ||
310 | #endif | ||
311 | return resource_alignment(res); | ||
312 | } | ||
313 | |||
301 | #endif /* DRIVERS_PCI_H */ | 314 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 06b965623962..85ce23997be4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -992,7 +992,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, | |||
992 | 992 | ||
993 | static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) | 993 | static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) |
994 | { | 994 | { |
995 | /* set sb600/sb700/sb800 sata to ahci mode */ | 995 | /* set SBX00 SATA in IDE mode to AHCI mode */ |
996 | u8 tmp; | 996 | u8 tmp; |
997 | 997 | ||
998 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); | 998 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); |
@@ -1011,6 +1011,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk | |||
1011 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); | 1011 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); |
1012 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); | 1012 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); |
1013 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); | 1013 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); |
1014 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); | ||
1015 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); | ||
1014 | 1016 | ||
1015 | /* | 1017 | /* |
1016 | * Serverworks CSB5 IDE does not fully support native mode | 1018 | * Serverworks CSB5 IDE does not fully support native mode |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index b636e245445d..7c443b4583ab 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | 28 | #include "pci.h" | |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) |
31 | { | 31 | { |
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long | |||
384 | continue; | 384 | continue; |
385 | r_size = resource_size(r); | 385 | r_size = resource_size(r); |
386 | /* For bridges size != alignment */ | 386 | /* For bridges size != alignment */ |
387 | align = resource_alignment(r); | 387 | align = pci_resource_alignment(dev, r); |
388 | order = __ffs(align) - 20; | 388 | order = __ffs(align) - 20; |
389 | if (order > 11) { | 389 | if (order > 11) { |
390 | dev_warn(&dev->dev, "BAR %d bad alignment %llx: " | 390 | dev_warn(&dev->dev, "BAR %d bad alignment %llx: " |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 1898c7b47907..88cdd1a937d6 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
144 | 144 | ||
145 | size = resource_size(res); | 145 | size = resource_size(res); |
146 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 146 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
147 | align = resource_alignment(res); | 147 | align = pci_resource_alignment(dev, res); |
148 | 148 | ||
149 | /* First, try exact prefetching match.. */ | 149 | /* First, try exact prefetching match.. */ |
150 | ret = pci_bus_alloc_resource(bus, res, size, align, min, | 150 | ret = pci_bus_alloc_resource(bus, res, size, align, min, |
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
178 | struct pci_bus *bus; | 178 | struct pci_bus *bus; |
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | align = resource_alignment(res); | 181 | align = pci_resource_alignment(dev, res); |
182 | if (!align) { | 182 | if (!align) { |
183 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " | 183 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " |
184 | "alignment) %pR flags %#lx\n", | 184 | "alignment) %pR flags %#lx\n", |
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | |||
259 | if (!(r->flags) || r->parent) | 259 | if (!(r->flags) || r->parent) |
260 | continue; | 260 | continue; |
261 | 261 | ||
262 | r_align = resource_alignment(r); | 262 | r_align = pci_resource_alignment(dev, r); |
263 | if (!r_align) { | 263 | if (!r_align) { |
264 | dev_warn(&dev->dev, "BAR %d: bogus alignment " | 264 | dev_warn(&dev->dev, "BAR %d: bogus alignment " |
265 | "%pR flags %#lx\n", | 265 | "%pR flags %#lx\n", |
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | |||
271 | struct resource_list *ln = list->next; | 271 | struct resource_list *ln = list->next; |
272 | 272 | ||
273 | if (ln) | 273 | if (ln) |
274 | align = resource_alignment(ln->res); | 274 | align = pci_resource_alignment(ln->dev, ln->res); |
275 | 275 | ||
276 | if (r_align > align) { | 276 | if (r_align > align) { |
277 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | 277 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 81d31ea507d1..51c0a8bee414 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
@@ -335,6 +335,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data) | |||
335 | if (hci_result != HCI_SUCCESS) { | 335 | if (hci_result != HCI_SUCCESS) { |
336 | /* Can't do anything useful */ | 336 | /* Can't do anything useful */ |
337 | mutex_unlock(&dev->mutex); | 337 | mutex_unlock(&dev->mutex); |
338 | return; | ||
338 | } | 339 | } |
339 | 340 | ||
340 | new_rfk_state = value; | 341 | new_rfk_state = value; |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 043b208d971d..f215a5919192 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) | |||
270 | acpi_status status; | 270 | acpi_status status; |
271 | struct acpi_object_list input; | 271 | struct acpi_object_list input; |
272 | union acpi_object params[3]; | 272 | union acpi_object params[3]; |
273 | char method[4] = "WM"; | 273 | char method[5] = "WM"; |
274 | 274 | ||
275 | if (!find_guid(guid_string, &wblock)) | 275 | if (!find_guid(guid_string, &wblock)) |
276 | return AE_ERROR; | 276 | return AE_ERROR; |
@@ -328,8 +328,8 @@ struct acpi_buffer *out) | |||
328 | acpi_status status, wc_status = AE_ERROR; | 328 | acpi_status status, wc_status = AE_ERROR; |
329 | struct acpi_object_list input, wc_input; | 329 | struct acpi_object_list input, wc_input; |
330 | union acpi_object wc_params[1], wq_params[1]; | 330 | union acpi_object wc_params[1], wq_params[1]; |
331 | char method[4]; | 331 | char method[5]; |
332 | char wc_method[4] = "WC"; | 332 | char wc_method[5] = "WC"; |
333 | 333 | ||
334 | if (!guid_string || !out) | 334 | if (!guid_string || !out) |
335 | return AE_BAD_PARAMETER; | 335 | return AE_BAD_PARAMETER; |
@@ -410,7 +410,7 @@ const struct acpi_buffer *in) | |||
410 | acpi_handle handle; | 410 | acpi_handle handle; |
411 | struct acpi_object_list input; | 411 | struct acpi_object_list input; |
412 | union acpi_object params[2]; | 412 | union acpi_object params[2]; |
413 | char method[4] = "WS"; | 413 | char method[5] = "WS"; |
414 | 414 | ||
415 | if (!guid_string || !in) | 415 | if (!guid_string || !in) |
416 | return AE_BAD_DATA; | 416 | return AE_BAD_DATA; |
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 7e6b5a3b3281..fc83783c3a96 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c | |||
@@ -55,12 +55,13 @@ __asm__(".text \n" | |||
55 | 55 | ||
56 | #define Q2_SET_SEL(cpu, selname, address, size) \ | 56 | #define Q2_SET_SEL(cpu, selname, address, size) \ |
57 | do { \ | 57 | do { \ |
58 | struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ | 58 | struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ |
59 | set_base(gdt[(selname) >> 3], (u32)(address)); \ | 59 | set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \ |
60 | set_limit(gdt[(selname) >> 3], size); \ | 60 | set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ |
61 | } while(0) | 61 | } while(0) |
62 | 62 | ||
63 | static struct desc_struct bad_bios_desc; | 63 | static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, |
64 | (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); | ||
64 | 65 | ||
65 | /* | 66 | /* |
66 | * At some point we want to use this stack frame pointer to unwind | 67 | * At some point we want to use this stack frame pointer to unwind |
@@ -476,19 +477,15 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) | |||
476 | pnp_bios_callpoint.offset = header->fields.pm16offset; | 477 | pnp_bios_callpoint.offset = header->fields.pm16offset; |
477 | pnp_bios_callpoint.segment = PNP_CS16; | 478 | pnp_bios_callpoint.segment = PNP_CS16; |
478 | 479 | ||
479 | bad_bios_desc.a = 0; | ||
480 | bad_bios_desc.b = 0x00409200; | ||
481 | |||
482 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | ||
483 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | ||
484 | for_each_possible_cpu(i) { | 480 | for_each_possible_cpu(i) { |
485 | struct desc_struct *gdt = get_cpu_gdt_table(i); | 481 | struct desc_struct *gdt = get_cpu_gdt_table(i); |
486 | if (!gdt) | 482 | if (!gdt) |
487 | continue; | 483 | continue; |
488 | set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc); | 484 | set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32], |
489 | set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], | 485 | (unsigned long)&pnp_bios_callfunc); |
490 | __va(header->fields.pm16cseg)); | 486 | set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16], |
491 | set_base(gdt[GDT_ENTRY_PNPBIOS_DS], | 487 | (unsigned long)__va(header->fields.pm16cseg)); |
492 | __va(header->fields.pm16dseg)); | 488 | set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], |
489 | (unsigned long)__va(header->fields.pm16dseg)); | ||
493 | } | 490 | } |
494 | } | 491 | } |
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index ac8cc8cea1e3..fea17e7805e9 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c | |||
@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps) | |||
244 | } | 244 | } |
245 | pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, | 245 | pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, |
246 | "pps%d", pps->id); | 246 | "pps%d", pps->id); |
247 | if (err) | 247 | if (IS_ERR(pps->dev)) |
248 | goto del_cdev; | 248 | goto del_cdev; |
249 | dev_set_drvdata(pps->dev, pps); | 249 | dev_set_drvdata(pps->dev, pps); |
250 | 250 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 749836668655..e109da4583a8 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block, | |||
669 | * memory and 2) dasd_smalloc_request uses the static ccw memory | 669 | * memory and 2) dasd_smalloc_request uses the static ccw memory |
670 | * that gets allocated for each device. | 670 | * that gets allocated for each device. |
671 | */ | 671 | */ |
672 | struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, | 672 | struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, |
673 | int datasize, | 673 | int datasize, |
674 | struct dasd_device *device) | 674 | struct dasd_device *device) |
675 | { | 675 | { |
676 | struct dasd_ccw_req *cqr; | 676 | struct dasd_ccw_req *cqr; |
677 | 677 | ||
678 | /* Sanity checks */ | 678 | /* Sanity checks */ |
679 | BUG_ON( magic == NULL || datasize > PAGE_SIZE || | 679 | BUG_ON(datasize > PAGE_SIZE || |
680 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); | 680 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); |
681 | 681 | ||
682 | cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); | 682 | cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); |
@@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, | |||
700 | return ERR_PTR(-ENOMEM); | 700 | return ERR_PTR(-ENOMEM); |
701 | } | 701 | } |
702 | } | 702 | } |
703 | strncpy((char *) &cqr->magic, magic, 4); | 703 | cqr->magic = magic; |
704 | ASCEBC((char *) &cqr->magic, 4); | ||
705 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 704 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
706 | dasd_get_device(device); | 705 | dasd_get_device(device); |
707 | return cqr; | 706 | return cqr; |
708 | } | 707 | } |
709 | 708 | ||
710 | struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, | 709 | struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, |
711 | int datasize, | 710 | int datasize, |
712 | struct dasd_device *device) | 711 | struct dasd_device *device) |
713 | { | 712 | { |
@@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, | |||
717 | int size; | 716 | int size; |
718 | 717 | ||
719 | /* Sanity checks */ | 718 | /* Sanity checks */ |
720 | BUG_ON( magic == NULL || datasize > PAGE_SIZE || | 719 | BUG_ON(datasize > PAGE_SIZE || |
721 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); | 720 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); |
722 | 721 | ||
723 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; | 722 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; |
@@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, | |||
744 | cqr->data = data; | 743 | cqr->data = data; |
745 | memset(cqr->data, 0, datasize); | 744 | memset(cqr->data, 0, datasize); |
746 | } | 745 | } |
747 | strncpy((char *) &cqr->magic, magic, 4); | 746 | cqr->magic = magic; |
748 | ASCEBC((char *) &cqr->magic, 4); | ||
749 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 747 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
750 | dasd_get_device(device); | 748 | dasd_get_device(device); |
751 | return cqr; | 749 | return cqr; |
@@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
899 | switch (rc) { | 897 | switch (rc) { |
900 | case 0: | 898 | case 0: |
901 | cqr->status = DASD_CQR_IN_IO; | 899 | cqr->status = DASD_CQR_IN_IO; |
902 | DBF_DEV_EVENT(DBF_DEBUG, device, | ||
903 | "start_IO: request %p started successful", | ||
904 | cqr); | ||
905 | break; | 900 | break; |
906 | case -EBUSY: | 901 | case -EBUSY: |
907 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 902 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", |
@@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
1699 | * for that. State DASD_STATE_ONLINE is normal block device | 1694 | * for that. State DASD_STATE_ONLINE is normal block device |
1700 | * operation. | 1695 | * operation. |
1701 | */ | 1696 | */ |
1702 | if (basedev->state < DASD_STATE_READY) | 1697 | if (basedev->state < DASD_STATE_READY) { |
1698 | while ((req = blk_fetch_request(block->request_queue))) | ||
1699 | __blk_end_request_all(req, -EIO); | ||
1703 | return; | 1700 | return; |
1701 | } | ||
1704 | /* Now we try to fetch requests from the request queue */ | 1702 | /* Now we try to fetch requests from the request queue */ |
1705 | while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { | 1703 | while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { |
1706 | if (basedev->features & DASD_FEATURE_READONLY && | 1704 | if (basedev->features & DASD_FEATURE_READONLY && |
@@ -2135,9 +2133,9 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
2135 | struct dasd_device *base; | 2133 | struct dasd_device *base; |
2136 | 2134 | ||
2137 | block = bdev->bd_disk->private_data; | 2135 | block = bdev->bd_disk->private_data; |
2138 | base = block->base; | ||
2139 | if (!block) | 2136 | if (!block) |
2140 | return -ENODEV; | 2137 | return -ENODEV; |
2138 | base = block->base; | ||
2141 | 2139 | ||
2142 | if (!base->discipline || | 2140 | if (!base->discipline || |
2143 | !base->discipline->fill_geometry) | 2141 | !base->discipline->fill_geometry) |
@@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device); | |||
2530 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | 2528 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, |
2531 | void *rdc_buffer, | 2529 | void *rdc_buffer, |
2532 | int rdc_buffer_size, | 2530 | int rdc_buffer_size, |
2533 | char *magic) | 2531 | int magic) |
2534 | { | 2532 | { |
2535 | struct dasd_ccw_req *cqr; | 2533 | struct dasd_ccw_req *cqr; |
2536 | struct ccw1 *ccw; | 2534 | struct ccw1 *ccw; |
@@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | |||
2561 | } | 2559 | } |
2562 | 2560 | ||
2563 | 2561 | ||
2564 | int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, | 2562 | int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, |
2565 | void *rdc_buffer, int rdc_buffer_size) | 2563 | void *rdc_buffer, int rdc_buffer_size) |
2566 | { | 2564 | { |
2567 | int ret; | 2565 | int ret; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 27991b692056..e8ff7b0c961d 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "dasd" | 10 | #define KMSG_COMPONENT "dasd-eckd" |
11 | 11 | ||
12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 5b7bbc87593b..70a008c00522 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | 5 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "dasd" | 8 | #define KMSG_COMPONENT "dasd-eckd" |
9 | 9 | ||
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <asm/ebcdic.h> | 11 | #include <asm/ebcdic.h> |
@@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device, | |||
379 | int rc; | 379 | int rc; |
380 | unsigned long flags; | 380 | unsigned long flags; |
381 | 381 | ||
382 | cqr = dasd_kmalloc_request("ECKD", | 382 | cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
383 | 1 /* PSF */ + 1 /* RSSD */ , | ||
384 | (sizeof(struct dasd_psf_prssd_data)), | 383 | (sizeof(struct dasd_psf_prssd_data)), |
385 | device); | 384 | device); |
386 | if (IS_ERR(cqr)) | 385 | if (IS_ERR(cqr)) |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 644086ba2ede..4e49b4a6c880 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "dasd" | 11 | #define KMSG_COMPONENT "dasd-diag" |
12 | 12 | ||
13 | #include <linux/stddef.h> | 13 | #include <linux/stddef.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
@@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
523 | /* Build the request */ | 523 | /* Build the request */ |
524 | datasize = sizeof(struct dasd_diag_req) + | 524 | datasize = sizeof(struct dasd_diag_req) + |
525 | count*sizeof(struct dasd_diag_bio); | 525 | count*sizeof(struct dasd_diag_bio); |
526 | cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, | 526 | cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); |
527 | datasize, memdev); | ||
528 | if (IS_ERR(cqr)) | 527 | if (IS_ERR(cqr)) |
529 | return cqr; | 528 | return cqr; |
530 | 529 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c11770f5b368..a1ce573648a2 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * Author.........: Nigel Hislop <hislop_nigel@emc.com> | 10 | * Author.........: Nigel Hislop <hislop_nigel@emc.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "dasd" | 13 | #define KMSG_COMPONENT "dasd-eckd" |
14 | 14 | ||
15 | #include <linux/stddef.h> | 15 | #include <linux/stddef.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, | |||
730 | struct dasd_ccw_req *cqr; | 730 | struct dasd_ccw_req *cqr; |
731 | struct ccw1 *ccw; | 731 | struct ccw1 *ccw; |
732 | 732 | ||
733 | cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); | 733 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, |
734 | device); | ||
734 | 735 | ||
735 | if (IS_ERR(cqr)) { | 736 | if (IS_ERR(cqr)) { |
736 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 737 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
@@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) | |||
934 | struct dasd_eckd_private *private; | 935 | struct dasd_eckd_private *private; |
935 | 936 | ||
936 | private = (struct dasd_eckd_private *) device->private; | 937 | private = (struct dasd_eckd_private *) device->private; |
937 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 938 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
938 | 1 /* PSF */ + 1 /* RSSD */ , | ||
939 | (sizeof(struct dasd_psf_prssd_data) + | 939 | (sizeof(struct dasd_psf_prssd_data) + |
940 | sizeof(struct dasd_rssd_features)), | 940 | sizeof(struct dasd_rssd_features)), |
941 | device); | 941 | device); |
@@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, | |||
998 | struct dasd_psf_ssc_data *psf_ssc_data; | 998 | struct dasd_psf_ssc_data *psf_ssc_data; |
999 | struct ccw1 *ccw; | 999 | struct ccw1 *ccw; |
1000 | 1000 | ||
1001 | cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , | 1001 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , |
1002 | sizeof(struct dasd_psf_ssc_data), | 1002 | sizeof(struct dasd_psf_ssc_data), |
1003 | device); | 1003 | device); |
1004 | 1004 | ||
@@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1149 | goto out_err3; | 1149 | goto out_err3; |
1150 | 1150 | ||
1151 | /* Read Device Characteristics */ | 1151 | /* Read Device Characteristics */ |
1152 | rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, | 1152 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, |
1153 | 64); | 1153 | &private->rdc_data, 64); |
1154 | if (rc) { | 1154 | if (rc) { |
1155 | DBF_EVENT(DBF_WARNING, | 1155 | DBF_EVENT(DBF_WARNING, |
1156 | "Read device characteristics failed, rc=%d for " | 1156 | "Read device characteristics failed, rc=%d for " |
@@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) | |||
1217 | 1217 | ||
1218 | cplength = 8; | 1218 | cplength = 8; |
1219 | datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); | 1219 | datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); |
1220 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1220 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); |
1221 | cplength, datasize, device); | ||
1222 | if (IS_ERR(cqr)) | 1221 | if (IS_ERR(cqr)) |
1223 | return cqr; | 1222 | return cqr; |
1224 | ccw = cqr->cpaddr; | 1223 | ccw = cqr->cpaddr; |
@@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device, | |||
1499 | return ERR_PTR(-EINVAL); | 1498 | return ERR_PTR(-EINVAL); |
1500 | } | 1499 | } |
1501 | /* Allocate the format ccw request. */ | 1500 | /* Allocate the format ccw request. */ |
1502 | fcp = dasd_smalloc_request(dasd_eckd_discipline.name, | 1501 | fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); |
1503 | cplength, datasize, device); | ||
1504 | if (IS_ERR(fcp)) | 1502 | if (IS_ERR(fcp)) |
1505 | return fcp; | 1503 | return fcp; |
1506 | 1504 | ||
@@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
1783 | datasize += count*sizeof(struct LO_eckd_data); | 1781 | datasize += count*sizeof(struct LO_eckd_data); |
1784 | } | 1782 | } |
1785 | /* Allocate the ccw request. */ | 1783 | /* Allocate the ccw request. */ |
1786 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1784 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, |
1787 | cplength, datasize, startdev); | 1785 | startdev); |
1788 | if (IS_ERR(cqr)) | 1786 | if (IS_ERR(cqr)) |
1789 | return cqr; | 1787 | return cqr; |
1790 | ccw = cqr->cpaddr; | 1788 | ccw = cqr->cpaddr; |
@@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
1948 | cidaw * sizeof(unsigned long long); | 1946 | cidaw * sizeof(unsigned long long); |
1949 | 1947 | ||
1950 | /* Allocate the ccw request. */ | 1948 | /* Allocate the ccw request. */ |
1951 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1949 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, |
1952 | cplength, datasize, startdev); | 1950 | startdev); |
1953 | if (IS_ERR(cqr)) | 1951 | if (IS_ERR(cqr)) |
1954 | return cqr; | 1952 | return cqr; |
1955 | ccw = cqr->cpaddr; | 1953 | ccw = cqr->cpaddr; |
@@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
2249 | 2247 | ||
2250 | /* Allocate the ccw request. */ | 2248 | /* Allocate the ccw request. */ |
2251 | itcw_size = itcw_calc_size(0, ctidaw, 0); | 2249 | itcw_size = itcw_calc_size(0, ctidaw, 0); |
2252 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 2250 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); |
2253 | 0, itcw_size, startdev); | ||
2254 | if (IS_ERR(cqr)) | 2251 | if (IS_ERR(cqr)) |
2255 | return cqr; | 2252 | return cqr; |
2256 | 2253 | ||
@@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device) | |||
2557 | if (!capable(CAP_SYS_ADMIN)) | 2554 | if (!capable(CAP_SYS_ADMIN)) |
2558 | return -EACCES; | 2555 | return -EACCES; |
2559 | 2556 | ||
2560 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 2557 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2561 | 1, 32, device); | ||
2562 | if (IS_ERR(cqr)) { | 2558 | if (IS_ERR(cqr)) { |
2563 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2559 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
2564 | "Could not allocate initialization request"); | 2560 | "Could not allocate initialization request"); |
@@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
2600 | if (!capable(CAP_SYS_ADMIN)) | 2596 | if (!capable(CAP_SYS_ADMIN)) |
2601 | return -EACCES; | 2597 | return -EACCES; |
2602 | 2598 | ||
2603 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 2599 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2604 | 1, 32, device); | ||
2605 | if (IS_ERR(cqr)) { | 2600 | if (IS_ERR(cqr)) { |
2606 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2601 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
2607 | "Could not allocate initialization request"); | 2602 | "Could not allocate initialization request"); |
@@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
2642 | if (!capable(CAP_SYS_ADMIN)) | 2637 | if (!capable(CAP_SYS_ADMIN)) |
2643 | return -EACCES; | 2638 | return -EACCES; |
2644 | 2639 | ||
2645 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 2640 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2646 | 1, 32, device); | ||
2647 | if (IS_ERR(cqr)) { | 2641 | if (IS_ERR(cqr)) { |
2648 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2642 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
2649 | "Could not allocate initialization request"); | 2643 | "Could not allocate initialization request"); |
@@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) | |||
2681 | struct ccw1 *ccw; | 2675 | struct ccw1 *ccw; |
2682 | int rc; | 2676 | int rc; |
2683 | 2677 | ||
2684 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 2678 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
2685 | 1 /* PSF */ + 1 /* RSSD */ , | ||
2686 | (sizeof(struct dasd_psf_prssd_data) + | 2679 | (sizeof(struct dasd_psf_prssd_data) + |
2687 | sizeof(struct dasd_rssd_perf_stats_t)), | 2680 | sizeof(struct dasd_rssd_perf_stats_t)), |
2688 | device); | 2681 | device); |
@@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) | |||
2828 | } | 2821 | } |
2829 | 2822 | ||
2830 | /* setup CCWs for PSF + RSSD */ | 2823 | /* setup CCWs for PSF + RSSD */ |
2831 | cqr = dasd_smalloc_request("ECKD", 2 , 0, device); | 2824 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); |
2832 | if (IS_ERR(cqr)) { | 2825 | if (IS_ERR(cqr)) { |
2833 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2826 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
2834 | "Could not allocate initialization request"); | 2827 | "Could not allocate initialization request"); |
@@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) | |||
3254 | 3247 | ||
3255 | /* Read Device Characteristics */ | 3248 | /* Read Device Characteristics */ |
3256 | memset(&private->rdc_data, 0, sizeof(private->rdc_data)); | 3249 | memset(&private->rdc_data, 0, sizeof(private->rdc_data)); |
3257 | rc = dasd_generic_read_dev_chars(device, "ECKD", | 3250 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, |
3258 | &private->rdc_data, 64); | 3251 | &private->rdc_data, 64); |
3259 | if (rc) { | 3252 | if (rc) { |
3260 | DBF_EVENT(DBF_WARNING, | 3253 | DBF_EVENT(DBF_WARNING, |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index c24c8c30380d..d96039eae59b 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | 6 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "dasd" | 9 | #define KMSG_COMPONENT "dasd-eckd" |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
@@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device) | |||
464 | if (!device->discipline || strcmp(device->discipline->name, "ECKD")) | 464 | if (!device->discipline || strcmp(device->discipline->name, "ECKD")) |
465 | return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ | 465 | return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ |
466 | 466 | ||
467 | cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, | 467 | cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, |
468 | SNSS_DATA_SIZE, device); | 468 | SNSS_DATA_SIZE, device); |
469 | if (IS_ERR(cqr)) | 469 | if (IS_ERR(cqr)) |
470 | return -ENOMEM; | 470 | return -ENOMEM; |
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index cb8f9cef7429..7656384a811d 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c | |||
@@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) | |||
99 | cqr->lpm = LPM_ANYPATH; | 99 | cqr->lpm = LPM_ANYPATH; |
100 | cqr->status = DASD_CQR_FILLED; | 100 | cqr->status = DASD_CQR_FILLED; |
101 | } else { | 101 | } else { |
102 | dev_err(&device->cdev->dev, | 102 | pr_err("%s: default ERP has run out of retries and failed\n", |
103 | "default ERP has run out of retries and failed\n"); | 103 | dev_name(&device->cdev->dev)); |
104 | cqr->status = DASD_CQR_FAILED; | 104 | cqr->status = DASD_CQR_FAILED; |
105 | cqr->stopclk = get_clock(); | 105 | cqr->stopclk = get_clock(); |
106 | } | 106 | } |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 31849ad5e59f..f245377e8e27 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright IBM Corp. 1999, 2009 | 5 | * Copyright IBM Corp. 1999, 2009 |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "dasd" | 8 | #define KMSG_COMPONENT "dasd-fba" |
9 | 9 | ||
10 | #include <linux/stddef.h> | 10 | #include <linux/stddef.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
@@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device) | |||
152 | block->base = device; | 152 | block->base = device; |
153 | 153 | ||
154 | /* Read Device Characteristics */ | 154 | /* Read Device Characteristics */ |
155 | rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data, | 155 | rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, |
156 | 32); | 156 | &private->rdc_data, 32); |
157 | if (rc) { | 157 | if (rc) { |
158 | DBF_EVENT(DBF_WARNING, "Read device characteristics returned " | 158 | DBF_EVENT(DBF_WARNING, "Read device characteristics returned " |
159 | "error %d for device: %s", | 159 | "error %d for device: %s", |
@@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
305 | datasize += (count - 1)*sizeof(struct LO_fba_data); | 305 | datasize += (count - 1)*sizeof(struct LO_fba_data); |
306 | } | 306 | } |
307 | /* Allocate the ccw request. */ | 307 | /* Allocate the ccw request. */ |
308 | cqr = dasd_smalloc_request(dasd_fba_discipline.name, | 308 | cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); |
309 | cplength, datasize, memdev); | ||
310 | if (IS_ERR(cqr)) | 309 | if (IS_ERR(cqr)) |
311 | return cqr; | 310 | return cqr; |
312 | ccw = cqr->cpaddr; | 311 | ccw = cqr->cpaddr; |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index b699ca356ac5..5e47a1ee52b9 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -59,6 +59,11 @@ | |||
59 | #include <asm/dasd.h> | 59 | #include <asm/dasd.h> |
60 | #include <asm/idals.h> | 60 | #include <asm/idals.h> |
61 | 61 | ||
62 | /* DASD discipline magic */ | ||
63 | #define DASD_ECKD_MAGIC 0xC5C3D2C4 | ||
64 | #define DASD_DIAG_MAGIC 0xC4C9C1C7 | ||
65 | #define DASD_FBA_MAGIC 0xC6C2C140 | ||
66 | |||
62 | /* | 67 | /* |
63 | * SECTION: Type definitions | 68 | * SECTION: Type definitions |
64 | */ | 69 | */ |
@@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations; | |||
540 | extern struct kmem_cache *dasd_page_cache; | 545 | extern struct kmem_cache *dasd_page_cache; |
541 | 546 | ||
542 | struct dasd_ccw_req * | 547 | struct dasd_ccw_req * |
543 | dasd_kmalloc_request(char *, int, int, struct dasd_device *); | 548 | dasd_kmalloc_request(int , int, int, struct dasd_device *); |
544 | struct dasd_ccw_req * | 549 | struct dasd_ccw_req * |
545 | dasd_smalloc_request(char *, int, int, struct dasd_device *); | 550 | dasd_smalloc_request(int , int, int, struct dasd_device *); |
546 | void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); | 551 | void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); |
547 | void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); | 552 | void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); |
548 | 553 | ||
@@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *); | |||
587 | int dasd_generic_pm_freeze(struct ccw_device *); | 592 | int dasd_generic_pm_freeze(struct ccw_device *); |
588 | int dasd_generic_restore_device(struct ccw_device *); | 593 | int dasd_generic_restore_device(struct ccw_device *); |
589 | 594 | ||
590 | int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); | 595 | int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); |
591 | char *dasd_get_sense(struct irb *); | 596 | char *dasd_get_sense(struct irb *); |
592 | 597 | ||
593 | /* externals in dasd_devmap.c */ | 598 | /* externals in dasd_devmap.c */ |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index df918ef27965..f756a1b0c57a 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block) | |||
98 | if (!capable (CAP_SYS_ADMIN)) | 98 | if (!capable (CAP_SYS_ADMIN)) |
99 | return -EACCES; | 99 | return -EACCES; |
100 | 100 | ||
101 | dev_info(&base->cdev->dev, "The DASD has been put in the quiesce " | 101 | pr_info("%s: The DASD has been put in the quiesce " |
102 | "state\n"); | 102 | "state\n", dev_name(&base->cdev->dev)); |
103 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); | 103 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); |
104 | base->stopped |= DASD_STOPPED_QUIESCE; | 104 | base->stopped |= DASD_STOPPED_QUIESCE; |
105 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); | 105 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); |
@@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block) | |||
119 | if (!capable (CAP_SYS_ADMIN)) | 119 | if (!capable (CAP_SYS_ADMIN)) |
120 | return -EACCES; | 120 | return -EACCES; |
121 | 121 | ||
122 | dev_info(&base->cdev->dev, "I/O operations have been resumed " | 122 | pr_info("%s: I/O operations have been resumed " |
123 | "on the DASD\n"); | 123 | "on the DASD\n", dev_name(&base->cdev->dev)); |
124 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); | 124 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); |
125 | base->stopped &= ~DASD_STOPPED_QUIESCE; | 125 | base->stopped &= ~DASD_STOPPED_QUIESCE; |
126 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); | 126 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); |
@@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) | |||
146 | return -EPERM; | 146 | return -EPERM; |
147 | 147 | ||
148 | if (base->state != DASD_STATE_BASIC) { | 148 | if (base->state != DASD_STATE_BASIC) { |
149 | dev_warn(&base->cdev->dev, | 149 | pr_warning("%s: The DASD cannot be formatted while it is " |
150 | "The DASD cannot be formatted while it is enabled\n"); | 150 | "enabled\n", dev_name(&base->cdev->dev)); |
151 | return -EBUSY; | 151 | return -EBUSY; |
152 | } | 152 | } |
153 | 153 | ||
@@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) | |||
175 | dasd_sfree_request(cqr, cqr->memdev); | 175 | dasd_sfree_request(cqr, cqr->memdev); |
176 | if (rc) { | 176 | if (rc) { |
177 | if (rc != -ERESTARTSYS) | 177 | if (rc != -ERESTARTSYS) |
178 | dev_err(&base->cdev->dev, | 178 | pr_err("%s: Formatting unit %d failed with " |
179 | "Formatting unit %d failed with " | 179 | "rc=%d\n", dev_name(&base->cdev->dev), |
180 | "rc=%d\n", fdata->start_unit, rc); | 180 | fdata->start_unit, rc); |
181 | return rc; | 181 | return rc; |
182 | } | 182 | } |
183 | fdata->start_unit++; | 183 | fdata->start_unit++; |
@@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp) | |||
204 | if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) | 204 | if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) |
205 | return -EFAULT; | 205 | return -EFAULT; |
206 | if (bdev != bdev->bd_contains) { | 206 | if (bdev != bdev->bd_contains) { |
207 | dev_warn(&block->base->cdev->dev, | 207 | pr_warning("%s: The specified DASD is a partition and cannot " |
208 | "The specified DASD is a partition and cannot be " | 208 | "be formatted\n", |
209 | "formatted\n"); | 209 | dev_name(&block->base->cdev->dev)); |
210 | return -EINVAL; | 210 | return -EINVAL; |
211 | } | 211 | } |
212 | return dasd_format(block, &fdata); | 212 | return dasd_format(block, &fdata); |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index db442cd6621e..ee604e92a5fa 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/suspend.h> | 42 | #include <linux/suspend.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
45 | #include <asm/checksum.h> | ||
46 | 45 | ||
47 | #define XPRAM_NAME "xpram" | 46 | #define XPRAM_NAME "xpram" |
48 | #define XPRAM_DEVS 1 /* one partition */ | 47 | #define XPRAM_DEVS 1 /* one partition */ |
@@ -51,7 +50,6 @@ | |||
51 | typedef struct { | 50 | typedef struct { |
52 | unsigned int size; /* size of xpram segment in pages */ | 51 | unsigned int size; /* size of xpram segment in pages */ |
53 | unsigned int offset; /* start page of xpram segment */ | 52 | unsigned int offset; /* start page of xpram segment */ |
54 | unsigned int csum; /* partition checksum for suspend */ | ||
55 | } xpram_device_t; | 53 | } xpram_device_t; |
56 | 54 | ||
57 | static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; | 55 | static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; |
@@ -387,58 +385,6 @@ out: | |||
387 | } | 385 | } |
388 | 386 | ||
389 | /* | 387 | /* |
390 | * Save checksums for all partitions. | ||
391 | */ | ||
392 | static int xpram_save_checksums(void) | ||
393 | { | ||
394 | unsigned long mem_page; | ||
395 | int rc, i; | ||
396 | |||
397 | rc = 0; | ||
398 | mem_page = (unsigned long) __get_free_page(GFP_KERNEL); | ||
399 | if (!mem_page) | ||
400 | return -ENOMEM; | ||
401 | for (i = 0; i < xpram_devs; i++) { | ||
402 | rc = xpram_page_in(mem_page, xpram_devices[i].offset); | ||
403 | if (rc) | ||
404 | goto fail; | ||
405 | xpram_devices[i].csum = csum_partial((const void *) mem_page, | ||
406 | PAGE_SIZE, 0); | ||
407 | } | ||
408 | fail: | ||
409 | free_page(mem_page); | ||
410 | return rc ? -ENXIO : 0; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Verify checksums for all partitions. | ||
415 | */ | ||
416 | static int xpram_validate_checksums(void) | ||
417 | { | ||
418 | unsigned long mem_page; | ||
419 | unsigned int csum; | ||
420 | int rc, i; | ||
421 | |||
422 | rc = 0; | ||
423 | mem_page = (unsigned long) __get_free_page(GFP_KERNEL); | ||
424 | if (!mem_page) | ||
425 | return -ENOMEM; | ||
426 | for (i = 0; i < xpram_devs; i++) { | ||
427 | rc = xpram_page_in(mem_page, xpram_devices[i].offset); | ||
428 | if (rc) | ||
429 | goto fail; | ||
430 | csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0); | ||
431 | if (xpram_devices[i].csum != csum) { | ||
432 | rc = -EINVAL; | ||
433 | goto fail; | ||
434 | } | ||
435 | } | ||
436 | fail: | ||
437 | free_page(mem_page); | ||
438 | return rc ? -ENXIO : 0; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Resume failed: Print error message and call panic. | 388 | * Resume failed: Print error message and call panic. |
443 | */ | 389 | */ |
444 | static void xpram_resume_error(const char *message) | 390 | static void xpram_resume_error(const char *message) |
@@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev) | |||
458 | xpram_resume_error("xpram disappeared"); | 404 | xpram_resume_error("xpram disappeared"); |
459 | if (xpram_pages != xpram_highest_page_index() + 1) | 405 | if (xpram_pages != xpram_highest_page_index() + 1) |
460 | xpram_resume_error("Size of xpram changed"); | 406 | xpram_resume_error("Size of xpram changed"); |
461 | if (xpram_validate_checksums()) | ||
462 | xpram_resume_error("Data of xpram changed"); | ||
463 | return 0; | 407 | return 0; |
464 | } | 408 | } |
465 | 409 | ||
466 | /* | ||
467 | * Save necessary state in suspend. | ||
468 | */ | ||
469 | static int xpram_freeze(struct device *dev) | ||
470 | { | ||
471 | return xpram_save_checksums(); | ||
472 | } | ||
473 | |||
474 | static struct dev_pm_ops xpram_pm_ops = { | 410 | static struct dev_pm_ops xpram_pm_ops = { |
475 | .freeze = xpram_freeze, | ||
476 | .restore = xpram_restore, | 411 | .restore = xpram_restore, |
477 | }; | 412 | }; |
478 | 413 | ||
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 0769ced52dbd..4e34d3686c23 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig | |||
@@ -82,6 +82,16 @@ config SCLP_CPI | |||
82 | You should only select this option if you know what you are doing, | 82 | You should only select this option if you know what you are doing, |
83 | need this feature and intend to run your kernel in LPAR. | 83 | need this feature and intend to run your kernel in LPAR. |
84 | 84 | ||
85 | config SCLP_ASYNC | ||
86 | tristate "Support for Call Home via Asynchronous SCLP Records" | ||
87 | depends on S390 | ||
88 | help | ||
89 | This option enables the call home function, which is able to inform | ||
90 | the service element and connected organisations about a kernel panic. | ||
91 | You should only select this option if you know what you are doing, | ||
92 | want for inform other people about your kernel panics, | ||
93 | need this feature and intend to run your kernel in LPAR. | ||
94 | |||
85 | config S390_TAPE | 95 | config S390_TAPE |
86 | tristate "S/390 tape device support" | 96 | tristate "S/390 tape device support" |
87 | depends on CCW | 97 | depends on CCW |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 7e73e39a1741..efb500ab66c0 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o | |||
16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o | 16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o |
17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o | 17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o |
18 | obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o | 18 | obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o |
19 | obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o | 21 | obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o |
21 | obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o | 22 | obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 3234e90bd7f9..89ece1c235aa 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -581,7 +581,7 @@ static int __init mon_init(void) | |||
581 | monreader_device->release = (void (*)(struct device *))kfree; | 581 | monreader_device->release = (void (*)(struct device *))kfree; |
582 | rc = device_register(monreader_device); | 582 | rc = device_register(monreader_device); |
583 | if (rc) { | 583 | if (rc) { |
584 | kfree(monreader_device); | 584 | put_device(monreader_device); |
585 | goto out_driver; | 585 | goto out_driver; |
586 | } | 586 | } |
587 | 587 | ||
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 60e7cb07095b..6bb5a6bdfab5 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define EVTYP_VT220MSG 0x1A | 27 | #define EVTYP_VT220MSG 0x1A |
28 | #define EVTYP_CONFMGMDATA 0x04 | 28 | #define EVTYP_CONFMGMDATA 0x04 |
29 | #define EVTYP_SDIAS 0x1C | 29 | #define EVTYP_SDIAS 0x1C |
30 | #define EVTYP_ASYNC 0x0A | ||
30 | 31 | ||
31 | #define EVTYP_OPCMD_MASK 0x80000000 | 32 | #define EVTYP_OPCMD_MASK 0x80000000 |
32 | #define EVTYP_MSG_MASK 0x40000000 | 33 | #define EVTYP_MSG_MASK 0x40000000 |
@@ -38,6 +39,7 @@ | |||
38 | #define EVTYP_VT220MSG_MASK 0x00000040 | 39 | #define EVTYP_VT220MSG_MASK 0x00000040 |
39 | #define EVTYP_CONFMGMDATA_MASK 0x10000000 | 40 | #define EVTYP_CONFMGMDATA_MASK 0x10000000 |
40 | #define EVTYP_SDIAS_MASK 0x00000010 | 41 | #define EVTYP_SDIAS_MASK 0x00000010 |
42 | #define EVTYP_ASYNC_MASK 0x00400000 | ||
41 | 43 | ||
42 | #define GNRLMSGFLGS_DOM 0x8000 | 44 | #define GNRLMSGFLGS_DOM 0x8000 |
43 | #define GNRLMSGFLGS_SNDALRM 0x4000 | 45 | #define GNRLMSGFLGS_SNDALRM 0x4000 |
@@ -85,12 +87,12 @@ struct sccb_header { | |||
85 | } __attribute__((packed)); | 87 | } __attribute__((packed)); |
86 | 88 | ||
87 | extern u64 sclp_facilities; | 89 | extern u64 sclp_facilities; |
88 | |||
89 | #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) | 90 | #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) |
90 | #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) | 91 | #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) |
91 | #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) | 92 | #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) |
92 | #define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) | 93 | #define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) |
93 | 94 | ||
95 | |||
94 | struct gds_subvector { | 96 | struct gds_subvector { |
95 | u8 length; | 97 | u8 length; |
96 | u8 key; | 98 | u8 key; |
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c new file mode 100644 index 000000000000..daaec185ed36 --- /dev/null +++ b/drivers/s390/char/sclp_async.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Enable Asynchronous Notification via SCLP. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/stat.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/ctype.h> | ||
15 | #include <linux/kmod.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | #include <linux/utsname.h> | ||
21 | #include "sclp.h" | ||
22 | |||
23 | static int callhome_enabled; | ||
24 | static struct sclp_req *request; | ||
25 | static struct sclp_async_sccb *sccb; | ||
26 | static int sclp_async_send_wait(char *message); | ||
27 | static struct ctl_table_header *callhome_sysctl_header; | ||
28 | static DEFINE_SPINLOCK(sclp_async_lock); | ||
29 | static char nodename[64]; | ||
30 | #define SCLP_NORMAL_WRITE 0x00 | ||
31 | |||
32 | struct async_evbuf { | ||
33 | struct evbuf_header header; | ||
34 | u64 reserved; | ||
35 | u8 rflags; | ||
36 | u8 empty; | ||
37 | u8 rtype; | ||
38 | u8 otype; | ||
39 | char comp_id[12]; | ||
40 | char data[3000]; /* there is still some space left */ | ||
41 | } __attribute__((packed)); | ||
42 | |||
43 | struct sclp_async_sccb { | ||
44 | struct sccb_header header; | ||
45 | struct async_evbuf evbuf; | ||
46 | } __attribute__((packed)); | ||
47 | |||
48 | static struct sclp_register sclp_async_register = { | ||
49 | .send_mask = EVTYP_ASYNC_MASK, | ||
50 | }; | ||
51 | |||
52 | static int call_home_on_panic(struct notifier_block *self, | ||
53 | unsigned long event, void *data) | ||
54 | { | ||
55 | strncat(data, nodename, strlen(nodename)); | ||
56 | sclp_async_send_wait(data); | ||
57 | return NOTIFY_DONE; | ||
58 | } | ||
59 | |||
60 | static struct notifier_block call_home_panic_nb = { | ||
61 | .notifier_call = call_home_on_panic, | ||
62 | .priority = INT_MAX, | ||
63 | }; | ||
64 | |||
65 | static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp, | ||
66 | void __user *buffer, size_t *count, | ||
67 | loff_t *ppos) | ||
68 | { | ||
69 | unsigned long val; | ||
70 | int len, rc; | ||
71 | char buf[2]; | ||
72 | |||
73 | if (!*count | (*ppos && !write)) { | ||
74 | *count = 0; | ||
75 | return 0; | ||
76 | } | ||
77 | if (!write) { | ||
78 | len = sprintf(buf, "%d\n", callhome_enabled); | ||
79 | buf[len] = '\0'; | ||
80 | rc = copy_to_user(buffer, buf, sizeof(buf)); | ||
81 | if (rc != 0) | ||
82 | return -EFAULT; | ||
83 | } else { | ||
84 | len = *count; | ||
85 | rc = copy_from_user(buf, buffer, sizeof(buf)); | ||
86 | if (rc != 0) | ||
87 | return -EFAULT; | ||
88 | if (strict_strtoul(buf, 0, &val) != 0) | ||
89 | return -EINVAL; | ||
90 | if (val != 0 && val != 1) | ||
91 | return -EINVAL; | ||
92 | callhome_enabled = val; | ||
93 | } | ||
94 | *count = len; | ||
95 | *ppos += len; | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static struct ctl_table callhome_table[] = { | ||
100 | { | ||
101 | .procname = "callhome", | ||
102 | .mode = 0644, | ||
103 | .proc_handler = &proc_handler_callhome, | ||
104 | }, | ||
105 | { .ctl_name = 0 } | ||
106 | }; | ||
107 | |||
108 | static struct ctl_table kern_dir_table[] = { | ||
109 | { | ||
110 | .ctl_name = CTL_KERN, | ||
111 | .procname = "kernel", | ||
112 | .maxlen = 0, | ||
113 | .mode = 0555, | ||
114 | .child = callhome_table, | ||
115 | }, | ||
116 | { .ctl_name = 0 } | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * Function used to transfer asynchronous notification | ||
121 | * records which waits for send completion | ||
122 | */ | ||
123 | static int sclp_async_send_wait(char *message) | ||
124 | { | ||
125 | struct async_evbuf *evb; | ||
126 | int rc; | ||
127 | unsigned long flags; | ||
128 | |||
129 | if (!callhome_enabled) | ||
130 | return 0; | ||
131 | sccb->evbuf.header.type = EVTYP_ASYNC; | ||
132 | sccb->evbuf.rtype = 0xA5; | ||
133 | sccb->evbuf.otype = 0x00; | ||
134 | evb = &sccb->evbuf; | ||
135 | request->command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
136 | request->sccb = sccb; | ||
137 | request->status = SCLP_REQ_FILLED; | ||
138 | strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data)); | ||
139 | /* | ||
140 | * Retain Queue | ||
141 | * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) | ||
142 | */ | ||
143 | strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id)); | ||
144 | sccb->evbuf.header.length = sizeof(sccb->evbuf); | ||
145 | sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); | ||
146 | sccb->header.function_code = SCLP_NORMAL_WRITE; | ||
147 | rc = sclp_add_request(request); | ||
148 | if (rc) | ||
149 | return rc; | ||
150 | spin_lock_irqsave(&sclp_async_lock, flags); | ||
151 | while (request->status != SCLP_REQ_DONE && | ||
152 | request->status != SCLP_REQ_FAILED) { | ||
153 | sclp_sync_wait(); | ||
154 | } | ||
155 | spin_unlock_irqrestore(&sclp_async_lock, flags); | ||
156 | if (request->status != SCLP_REQ_DONE) | ||
157 | return -EIO; | ||
158 | rc = ((struct sclp_async_sccb *) | ||
159 | request->sccb)->header.response_code; | ||
160 | if (rc != 0x0020) | ||
161 | return -EIO; | ||
162 | if (evb->header.flags != 0x80) | ||
163 | return -EIO; | ||
164 | return rc; | ||
165 | } | ||
166 | |||
167 | static int __init sclp_async_init(void) | ||
168 | { | ||
169 | int rc; | ||
170 | |||
171 | rc = sclp_register(&sclp_async_register); | ||
172 | if (rc) | ||
173 | return rc; | ||
174 | callhome_sysctl_header = register_sysctl_table(kern_dir_table); | ||
175 | if (!callhome_sysctl_header) { | ||
176 | rc = -ENOMEM; | ||
177 | goto out_sclp; | ||
178 | } | ||
179 | if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) { | ||
180 | rc = -EOPNOTSUPP; | ||
181 | goto out_sclp; | ||
182 | } | ||
183 | rc = -ENOMEM; | ||
184 | request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); | ||
185 | if (!request) | ||
186 | goto out_sys; | ||
187 | sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
188 | if (!sccb) | ||
189 | goto out_mem; | ||
190 | rc = atomic_notifier_chain_register(&panic_notifier_list, | ||
191 | &call_home_panic_nb); | ||
192 | if (rc) | ||
193 | goto out_mem; | ||
194 | |||
195 | strncpy(nodename, init_utsname()->nodename, 64); | ||
196 | return 0; | ||
197 | |||
198 | out_mem: | ||
199 | kfree(request); | ||
200 | free_page((unsigned long) sccb); | ||
201 | out_sys: | ||
202 | unregister_sysctl_table(callhome_sysctl_header); | ||
203 | out_sclp: | ||
204 | sclp_unregister(&sclp_async_register); | ||
205 | return rc; | ||
206 | |||
207 | } | ||
208 | module_init(sclp_async_init); | ||
209 | |||
210 | static void __exit sclp_async_exit(void) | ||
211 | { | ||
212 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
213 | &call_home_panic_nb); | ||
214 | unregister_sysctl_table(callhome_sysctl_header); | ||
215 | sclp_unregister(&sclp_async_register); | ||
216 | free_page((unsigned long) sccb); | ||
217 | kfree(request); | ||
218 | } | ||
219 | module_exit(sclp_async_exit); | ||
220 | |||
221 | MODULE_AUTHOR("Copyright IBM Corp. 2009"); | ||
222 | MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>"); | ||
223 | MODULE_LICENSE("GPL"); | ||
224 | MODULE_DESCRIPTION("SCLP Asynchronous Notification Records"); | ||
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 5a519fac37b7..2fe45ff77b75 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "tape" | 11 | #define KMSG_COMPONENT "tape_34xx" |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 418f72dd39b4..e4cc3aae9162 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "tape" | 11 | #define KMSG_COMPONENT "tape_3590" |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA); | |||
39 | * - Read Alternate: implemented | 39 | * - Read Alternate: implemented |
40 | *******************************************************************/ | 40 | *******************************************************************/ |
41 | 41 | ||
42 | #define KMSG_COMPONENT "tape" | ||
43 | |||
44 | static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { | 42 | static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { |
45 | [0x00] = "", | 43 | [0x00] = "", |
46 | [0x10] = "Lost Sense", | 44 | [0x10] = "Lost Sense", |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 47ff695255ea..4cb9e70507ab 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk) | |||
302 | if (!device->blk_data.medium_changed) | 302 | if (!device->blk_data.medium_changed) |
303 | return 0; | 303 | return 0; |
304 | 304 | ||
305 | dev_info(&device->cdev->dev, "Determining the size of the recorded " | ||
306 | "area...\n"); | ||
307 | rc = tape_mtop(device, MTFSFM, 1); | 305 | rc = tape_mtop(device, MTFSFM, 1); |
308 | if (rc) | 306 | if (rc) |
309 | return rc; | 307 | return rc; |
@@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk) | |||
312 | if (rc < 0) | 310 | if (rc < 0) |
313 | return rc; | 311 | return rc; |
314 | 312 | ||
313 | pr_info("%s: Determining the size of the recorded area...\n", | ||
314 | dev_name(&device->cdev->dev)); | ||
315 | DBF_LH(3, "Image file ends at %d\n", rc); | 315 | DBF_LH(3, "Image file ends at %d\n", rc); |
316 | nr_of_blks = rc; | 316 | nr_of_blks = rc; |
317 | 317 | ||
@@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk) | |||
330 | device->bof = rc; | 330 | device->bof = rc; |
331 | nr_of_blks -= rc; | 331 | nr_of_blks -= rc; |
332 | 332 | ||
333 | dev_info(&device->cdev->dev, "The size of the recorded area is %i " | 333 | pr_info("%s: The size of the recorded area is %i blocks\n", |
334 | "blocks\n", nr_of_blks); | 334 | dev_name(&device->cdev->dev), nr_of_blks); |
335 | set_capacity(device->blk_data.disk, | 335 | set_capacity(device->blk_data.disk, |
336 | nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); | 336 | nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); |
337 | 337 | ||
@@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode) | |||
366 | 366 | ||
367 | if (device->required_tapemarks) { | 367 | if (device->required_tapemarks) { |
368 | DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); | 368 | DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); |
369 | dev_warn(&device->cdev->dev, "Opening the tape failed because" | 369 | pr_warning("%s: Opening the tape failed because of missing " |
370 | " of missing end-of-file marks\n"); | 370 | "end-of-file marks\n", dev_name(&device->cdev->dev)); |
371 | rc = -EPERM; | 371 | rc = -EPERM; |
372 | goto put_device; | 372 | goto put_device; |
373 | } | 373 | } |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 1d420d947596..5cd31e071647 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | |||
214 | switch(newstate){ | 214 | switch(newstate){ |
215 | case MS_UNLOADED: | 215 | case MS_UNLOADED: |
216 | device->tape_generic_status |= GMT_DR_OPEN(~0); | 216 | device->tape_generic_status |= GMT_DR_OPEN(~0); |
217 | dev_info(&device->cdev->dev, "The tape cartridge has been " | 217 | if (device->medium_state == MS_LOADED) |
218 | "successfully unloaded\n"); | 218 | pr_info("%s: The tape cartridge has been successfully " |
219 | "unloaded\n", dev_name(&device->cdev->dev)); | ||
219 | break; | 220 | break; |
220 | case MS_LOADED: | 221 | case MS_LOADED: |
221 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); | 222 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); |
222 | dev_info(&device->cdev->dev, "A tape cartridge has been " | 223 | if (device->medium_state == MS_UNLOADED) |
223 | "mounted\n"); | 224 | pr_info("%s: A tape cartridge has been mounted\n", |
225 | dev_name(&device->cdev->dev)); | ||
224 | break; | 226 | break; |
225 | default: | 227 | default: |
226 | // print nothing | 228 | // print nothing |
@@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device, | |||
358 | 360 | ||
359 | out_char: | 361 | out_char: |
360 | tapechar_cleanup_device(device); | 362 | tapechar_cleanup_device(device); |
363 | out_minor: | ||
364 | tape_remove_minor(device); | ||
361 | out_discipline: | 365 | out_discipline: |
362 | device->discipline->cleanup_device(device); | 366 | device->discipline->cleanup_device(device); |
363 | device->discipline = NULL; | 367 | device->discipline = NULL; |
364 | out_minor: | ||
365 | tape_remove_minor(device); | ||
366 | out: | 368 | out: |
367 | module_put(discipline->owner); | 369 | module_put(discipline->owner); |
368 | return rc; | 370 | return rc; |
@@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev) | |||
654 | */ | 656 | */ |
655 | DBF_EVENT(3, "(%08x): Drive in use vanished!\n", | 657 | DBF_EVENT(3, "(%08x): Drive in use vanished!\n", |
656 | device->cdev_id); | 658 | device->cdev_id); |
657 | dev_warn(&device->cdev->dev, "A tape unit was detached" | 659 | pr_warning("%s: A tape unit was detached while in " |
658 | " while in use\n"); | 660 | "use\n", dev_name(&device->cdev->dev)); |
659 | tape_state_set(device, TS_NOT_OPER); | 661 | tape_state_set(device, TS_NOT_OPER); |
660 | __tape_discard_requests(device); | 662 | __tape_discard_requests(device); |
661 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 663 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 1a9420ba518d..750354ad16e5 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c | |||
@@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device) | |||
68 | * to another host (actually this shouldn't happen but it does). | 68 | * to another host (actually this shouldn't happen but it does). |
69 | * So we set up a timeout for this call. | 69 | * So we set up a timeout for this call. |
70 | */ | 70 | */ |
71 | init_timer(&timeout); | 71 | init_timer_on_stack(&timeout); |
72 | timeout.function = tape_std_assign_timeout; | 72 | timeout.function = tape_std_assign_timeout; |
73 | timeout.data = (unsigned long) request; | 73 | timeout.data = (unsigned long) request; |
74 | timeout.expires = jiffies + 2 * HZ; | 74 | timeout.expires = jiffies + 2 * HZ; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index c20a4fe6da51..d1a142fa3eb4 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) | |||
765 | } else | 765 | } else |
766 | return -ENOMEM; | 766 | return -ENOMEM; |
767 | ret = device_register(dev); | 767 | ret = device_register(dev); |
768 | if (ret) | 768 | if (ret) { |
769 | put_device(dev); | ||
769 | return ret; | 770 | return ret; |
771 | } | ||
770 | 772 | ||
771 | ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group); | 773 | ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group); |
772 | if (ret) { | 774 | if (ret) { |
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 31b902e94f7b..77571b68539a 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -1026,9 +1026,15 @@ static int __init ur_init(void) | |||
1026 | 1026 | ||
1027 | debug_set_level(vmur_dbf, 6); | 1027 | debug_set_level(vmur_dbf, 6); |
1028 | 1028 | ||
1029 | vmur_class = class_create(THIS_MODULE, "vmur"); | ||
1030 | if (IS_ERR(vmur_class)) { | ||
1031 | rc = PTR_ERR(vmur_class); | ||
1032 | goto fail_free_dbf; | ||
1033 | } | ||
1034 | |||
1029 | rc = ccw_driver_register(&ur_driver); | 1035 | rc = ccw_driver_register(&ur_driver); |
1030 | if (rc) | 1036 | if (rc) |
1031 | goto fail_free_dbf; | 1037 | goto fail_class_destroy; |
1032 | 1038 | ||
1033 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); | 1039 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); |
1034 | if (rc) { | 1040 | if (rc) { |
@@ -1038,18 +1044,13 @@ static int __init ur_init(void) | |||
1038 | } | 1044 | } |
1039 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); | 1045 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); |
1040 | 1046 | ||
1041 | vmur_class = class_create(THIS_MODULE, "vmur"); | ||
1042 | if (IS_ERR(vmur_class)) { | ||
1043 | rc = PTR_ERR(vmur_class); | ||
1044 | goto fail_unregister_region; | ||
1045 | } | ||
1046 | pr_info("%s loaded.\n", ur_banner); | 1047 | pr_info("%s loaded.\n", ur_banner); |
1047 | return 0; | 1048 | return 0; |
1048 | 1049 | ||
1049 | fail_unregister_region: | ||
1050 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); | ||
1051 | fail_unregister_driver: | 1050 | fail_unregister_driver: |
1052 | ccw_driver_unregister(&ur_driver); | 1051 | ccw_driver_unregister(&ur_driver); |
1052 | fail_class_destroy: | ||
1053 | class_destroy(vmur_class); | ||
1053 | fail_free_dbf: | 1054 | fail_free_dbf: |
1054 | debug_unregister(vmur_dbf); | 1055 | debug_unregister(vmur_dbf); |
1055 | return rc; | 1056 | return rc; |
@@ -1057,9 +1058,9 @@ fail_free_dbf: | |||
1057 | 1058 | ||
1058 | static void __exit ur_exit(void) | 1059 | static void __exit ur_exit(void) |
1059 | { | 1060 | { |
1060 | class_destroy(vmur_class); | ||
1061 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); | 1061 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); |
1062 | ccw_driver_unregister(&ur_driver); | 1062 | ccw_driver_unregister(&ur_driver); |
1063 | class_destroy(vmur_class); | ||
1063 | debug_unregister(vmur_dbf); | 1064 | debug_unregister(vmur_dbf); |
1064 | pr_info("%s unloaded.\n", ur_banner); | 1065 | pr_info("%s unloaded.\n", ur_banner); |
1065 | } | 1066 | } |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1bbae433fbd8..c431198bdbc4 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -275,7 +275,7 @@ struct zcore_header { | |||
275 | u32 num_pages; | 275 | u32 num_pages; |
276 | u32 pad1; | 276 | u32 pad1; |
277 | u64 tod; | 277 | u64 tod; |
278 | cpuid_t cpu_id; | 278 | struct cpuid cpu_id; |
279 | u32 arch_id; | 279 | u32 arch_id; |
280 | u32 volnr; | 280 | u32 volnr; |
281 | u32 build_arch; | 281 | u32 build_arch; |
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index adb3dd301528..fa4c9662f65e 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ |
6 | fcx.o itcw.o crw.o | 6 | fcx.o itcw.o crw.o |
7 | ccw_device-objs += device.o device_fsm.o device_ops.o | 7 | ccw_device-objs += device.o device_fsm.o device_ops.o |
8 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 8 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 3e5f304ad88f..40002830d48a 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
@@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid) | |||
417 | if (ret) { | 417 | if (ret) { |
418 | CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", | 418 | CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", |
419 | chpid.cssid, chpid.id, ret); | 419 | chpid.cssid, chpid.id, ret); |
420 | goto out_free; | 420 | put_device(&chp->dev); |
421 | goto out; | ||
421 | } | 422 | } |
422 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | 423 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); |
423 | if (ret) { | 424 | if (ret) { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 425e8f89a6c5..37aa611d4ac5 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -37,29 +37,6 @@ struct channel_path_desc { | |||
37 | 37 | ||
38 | struct channel_path; | 38 | struct channel_path; |
39 | 39 | ||
40 | struct css_general_char { | ||
41 | u64 : 12; | ||
42 | u32 dynio : 1; /* bit 12 */ | ||
43 | u32 : 28; | ||
44 | u32 aif : 1; /* bit 41 */ | ||
45 | u32 : 3; | ||
46 | u32 mcss : 1; /* bit 45 */ | ||
47 | u32 fcs : 1; /* bit 46 */ | ||
48 | u32 : 1; | ||
49 | u32 ext_mb : 1; /* bit 48 */ | ||
50 | u32 : 7; | ||
51 | u32 aif_tdd : 1; /* bit 56 */ | ||
52 | u32 : 1; | ||
53 | u32 qebsm : 1; /* bit 58 */ | ||
54 | u32 : 8; | ||
55 | u32 aif_osa : 1; /* bit 67 */ | ||
56 | u32 : 14; | ||
57 | u32 cib : 1; /* bit 82 */ | ||
58 | u32 : 5; | ||
59 | u32 fcx : 1; /* bit 88 */ | ||
60 | u32 : 7; | ||
61 | }__attribute__((packed)); | ||
62 | |||
63 | struct css_chsc_char { | 40 | struct css_chsc_char { |
64 | u64 res; | 41 | u64 res; |
65 | u64 : 20; | 42 | u64 : 20; |
@@ -72,7 +49,6 @@ struct css_chsc_char { | |||
72 | u32 : 19; | 49 | u32 : 19; |
73 | }__attribute__((packed)); | 50 | }__attribute__((packed)); |
74 | 51 | ||
75 | extern struct css_general_char css_general_characteristics; | ||
76 | extern struct css_chsc_char css_chsc_characteristics; | 52 | extern struct css_chsc_char css_chsc_characteristics; |
77 | 53 | ||
78 | struct chsc_ssd_info { | 54 | struct chsc_ssd_info { |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 5ec7789bd9d8..138124fcfcad 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
139 | __u8 lpm, /* logical path mask */ | 139 | __u8 lpm, /* logical path mask */ |
140 | __u8 key) /* storage key */ | 140 | __u8 key) /* storage key */ |
141 | { | 141 | { |
142 | char dbf_txt[15]; | ||
143 | int ccode; | 142 | int ccode; |
144 | union orb *orb; | 143 | union orb *orb; |
145 | 144 | ||
146 | CIO_TRACE_EVENT(4, "stIO"); | 145 | CIO_TRACE_EVENT(5, "stIO"); |
147 | CIO_TRACE_EVENT(4, dev_name(&sch->dev)); | 146 | CIO_TRACE_EVENT(5, dev_name(&sch->dev)); |
148 | 147 | ||
149 | orb = &to_io_private(sch)->orb; | 148 | orb = &to_io_private(sch)->orb; |
150 | memset(orb, 0, sizeof(union orb)); | 149 | memset(orb, 0, sizeof(union orb)); |
@@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
169 | ccode = ssch(sch->schid, orb); | 168 | ccode = ssch(sch->schid, orb); |
170 | 169 | ||
171 | /* process condition code */ | 170 | /* process condition code */ |
172 | sprintf(dbf_txt, "ccode:%d", ccode); | 171 | CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); |
173 | CIO_TRACE_EVENT(4, dbf_txt); | ||
174 | 172 | ||
175 | switch (ccode) { | 173 | switch (ccode) { |
176 | case 0: | 174 | case 0: |
@@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) | |||
201 | int | 199 | int |
202 | cio_resume (struct subchannel *sch) | 200 | cio_resume (struct subchannel *sch) |
203 | { | 201 | { |
204 | char dbf_txt[15]; | ||
205 | int ccode; | 202 | int ccode; |
206 | 203 | ||
207 | CIO_TRACE_EVENT (4, "resIO"); | 204 | CIO_TRACE_EVENT(4, "resIO"); |
208 | CIO_TRACE_EVENT(4, dev_name(&sch->dev)); | 205 | CIO_TRACE_EVENT(4, dev_name(&sch->dev)); |
209 | 206 | ||
210 | ccode = rsch (sch->schid); | 207 | ccode = rsch (sch->schid); |
211 | 208 | ||
212 | sprintf (dbf_txt, "ccode:%d", ccode); | 209 | CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); |
213 | CIO_TRACE_EVENT (4, dbf_txt); | ||
214 | 210 | ||
215 | switch (ccode) { | 211 | switch (ccode) { |
216 | case 0: | 212 | case 0: |
@@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch) | |||
235 | int | 231 | int |
236 | cio_halt(struct subchannel *sch) | 232 | cio_halt(struct subchannel *sch) |
237 | { | 233 | { |
238 | char dbf_txt[15]; | ||
239 | int ccode; | 234 | int ccode; |
240 | 235 | ||
241 | if (!sch) | 236 | if (!sch) |
242 | return -ENODEV; | 237 | return -ENODEV; |
243 | 238 | ||
244 | CIO_TRACE_EVENT (2, "haltIO"); | 239 | CIO_TRACE_EVENT(2, "haltIO"); |
245 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 240 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
246 | 241 | ||
247 | /* | 242 | /* |
@@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch) | |||
249 | */ | 244 | */ |
250 | ccode = hsch (sch->schid); | 245 | ccode = hsch (sch->schid); |
251 | 246 | ||
252 | sprintf (dbf_txt, "ccode:%d", ccode); | 247 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
253 | CIO_TRACE_EVENT (2, dbf_txt); | ||
254 | 248 | ||
255 | switch (ccode) { | 249 | switch (ccode) { |
256 | case 0: | 250 | case 0: |
@@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch) | |||
270 | int | 264 | int |
271 | cio_clear(struct subchannel *sch) | 265 | cio_clear(struct subchannel *sch) |
272 | { | 266 | { |
273 | char dbf_txt[15]; | ||
274 | int ccode; | 267 | int ccode; |
275 | 268 | ||
276 | if (!sch) | 269 | if (!sch) |
277 | return -ENODEV; | 270 | return -ENODEV; |
278 | 271 | ||
279 | CIO_TRACE_EVENT (2, "clearIO"); | 272 | CIO_TRACE_EVENT(2, "clearIO"); |
280 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 273 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
281 | 274 | ||
282 | /* | 275 | /* |
@@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch) | |||
284 | */ | 277 | */ |
285 | ccode = csch (sch->schid); | 278 | ccode = csch (sch->schid); |
286 | 279 | ||
287 | sprintf (dbf_txt, "ccode:%d", ccode); | 280 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
288 | CIO_TRACE_EVENT (2, dbf_txt); | ||
289 | 281 | ||
290 | switch (ccode) { | 282 | switch (ccode) { |
291 | case 0: | 283 | case 0: |
@@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch) | |||
306 | int | 298 | int |
307 | cio_cancel (struct subchannel *sch) | 299 | cio_cancel (struct subchannel *sch) |
308 | { | 300 | { |
309 | char dbf_txt[15]; | ||
310 | int ccode; | 301 | int ccode; |
311 | 302 | ||
312 | if (!sch) | 303 | if (!sch) |
313 | return -ENODEV; | 304 | return -ENODEV; |
314 | 305 | ||
315 | CIO_TRACE_EVENT (2, "cancelIO"); | 306 | CIO_TRACE_EVENT(2, "cancelIO"); |
316 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 307 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
317 | 308 | ||
318 | ccode = xsch (sch->schid); | 309 | ccode = xsch (sch->schid); |
319 | 310 | ||
320 | sprintf (dbf_txt, "ccode:%d", ccode); | 311 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
321 | CIO_TRACE_EVENT (2, dbf_txt); | ||
322 | 312 | ||
323 | switch (ccode) { | 313 | switch (ccode) { |
324 | case 0: /* success */ | 314 | case 0: /* success */ |
@@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib); | |||
429 | */ | 419 | */ |
430 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 420 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
431 | { | 421 | { |
432 | char dbf_txt[15]; | ||
433 | int retry; | 422 | int retry; |
434 | int ret; | 423 | int ret; |
435 | 424 | ||
436 | CIO_TRACE_EVENT (2, "ensch"); | 425 | CIO_TRACE_EVENT(2, "ensch"); |
437 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 426 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
438 | 427 | ||
439 | if (sch_is_pseudo_sch(sch)) | 428 | if (sch_is_pseudo_sch(sch)) |
@@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
460 | } else | 449 | } else |
461 | break; | 450 | break; |
462 | } | 451 | } |
463 | sprintf (dbf_txt, "ret:%d", ret); | 452 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); |
464 | CIO_TRACE_EVENT (2, dbf_txt); | ||
465 | return ret; | 453 | return ret; |
466 | } | 454 | } |
467 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); | 455 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); |
@@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); | |||
472 | */ | 460 | */ |
473 | int cio_disable_subchannel(struct subchannel *sch) | 461 | int cio_disable_subchannel(struct subchannel *sch) |
474 | { | 462 | { |
475 | char dbf_txt[15]; | ||
476 | int retry; | 463 | int retry; |
477 | int ret; | 464 | int ret; |
478 | 465 | ||
479 | CIO_TRACE_EVENT (2, "dissch"); | 466 | CIO_TRACE_EVENT(2, "dissch"); |
480 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 467 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
481 | 468 | ||
482 | if (sch_is_pseudo_sch(sch)) | 469 | if (sch_is_pseudo_sch(sch)) |
@@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
495 | } else | 482 | } else |
496 | break; | 483 | break; |
497 | } | 484 | } |
498 | sprintf (dbf_txt, "ret:%d", ret); | 485 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); |
499 | CIO_TRACE_EVENT (2, dbf_txt); | ||
500 | return ret; | 486 | return ret; |
501 | } | 487 | } |
502 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); | 488 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); |
@@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) | |||
578 | goto out; | 564 | goto out; |
579 | } | 565 | } |
580 | mutex_init(&sch->reg_mutex); | 566 | mutex_init(&sch->reg_mutex); |
581 | /* Set a name for the subchannel */ | ||
582 | if (cio_is_console(schid)) | ||
583 | sch->dev.init_name = cio_get_console_sch_name(schid); | ||
584 | else | ||
585 | dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no); | ||
586 | 567 | ||
587 | /* | 568 | /* |
588 | * The first subchannel that is not-operational (ccode==3) | 569 | * The first subchannel that is not-operational (ccode==3) |
@@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
686 | 667 | ||
687 | #ifdef CONFIG_CCW_CONSOLE | 668 | #ifdef CONFIG_CCW_CONSOLE |
688 | static struct subchannel console_subchannel; | 669 | static struct subchannel console_subchannel; |
689 | static char console_sch_name[10] = "0.x.xxxx"; | ||
690 | static struct io_subchannel_private console_priv; | 670 | static struct io_subchannel_private console_priv; |
691 | static int console_subchannel_in_use; | 671 | static int console_subchannel_in_use; |
692 | 672 | ||
@@ -873,12 +853,6 @@ cio_get_console_subchannel(void) | |||
873 | return &console_subchannel; | 853 | return &console_subchannel; |
874 | } | 854 | } |
875 | 855 | ||
876 | const char *cio_get_console_sch_name(struct subchannel_id schid) | ||
877 | { | ||
878 | snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no); | ||
879 | return (const char *)console_sch_name; | ||
880 | } | ||
881 | |||
882 | #endif | 856 | #endif |
883 | static int | 857 | static int |
884 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | 858 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 5150fba742ac..2e43558c704b 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id); | |||
133 | extern struct subchannel *cio_get_console_subchannel(void); | 133 | extern struct subchannel *cio_get_console_subchannel(void); |
134 | extern spinlock_t * cio_get_console_lock(void); | 134 | extern spinlock_t * cio_get_console_lock(void); |
135 | extern void *cio_get_console_priv(void); | 135 | extern void *cio_get_console_priv(void); |
136 | extern const char *cio_get_console_sch_name(struct subchannel_id schid); | ||
137 | extern const char *cio_get_console_cdev_name(struct subchannel *sch); | ||
138 | #else | 136 | #else |
139 | #define cio_is_console(schid) 0 | 137 | #define cio_is_console(schid) 0 |
140 | #define cio_get_console_subchannel() NULL | 138 | #define cio_get_console_subchannel() NULL |
141 | #define cio_get_console_lock() NULL | 139 | #define cio_get_console_lock() NULL |
142 | #define cio_get_console_priv() NULL | 140 | #define cio_get_console_priv() NULL |
143 | #define cio_get_console_sch_name(schid) NULL | ||
144 | #define cio_get_console_cdev_name(sch) NULL | ||
145 | #endif | 141 | #endif |
146 | 142 | ||
147 | #endif | 143 | #endif |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 85d43c6bcb66..e995123fd805 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -152,24 +152,15 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | static void | 154 | static void |
155 | css_free_subchannel(struct subchannel *sch) | ||
156 | { | ||
157 | if (sch) { | ||
158 | /* Reset intparm to zeroes. */ | ||
159 | sch->config.intparm = 0; | ||
160 | cio_commit_config(sch); | ||
161 | kfree(sch->lock); | ||
162 | kfree(sch); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | static void | ||
167 | css_subchannel_release(struct device *dev) | 155 | css_subchannel_release(struct device *dev) |
168 | { | 156 | { |
169 | struct subchannel *sch; | 157 | struct subchannel *sch; |
170 | 158 | ||
171 | sch = to_subchannel(dev); | 159 | sch = to_subchannel(dev); |
172 | if (!cio_is_console(sch->schid)) { | 160 | if (!cio_is_console(sch->schid)) { |
161 | /* Reset intparm to zeroes. */ | ||
162 | sch->config.intparm = 0; | ||
163 | cio_commit_config(sch); | ||
173 | kfree(sch->lock); | 164 | kfree(sch->lock); |
174 | kfree(sch); | 165 | kfree(sch); |
175 | } | 166 | } |
@@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch) | |||
180 | int ret; | 171 | int ret; |
181 | 172 | ||
182 | mutex_lock(&sch->reg_mutex); | 173 | mutex_lock(&sch->reg_mutex); |
174 | dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, | ||
175 | sch->schid.sch_no); | ||
183 | ret = device_register(&sch->dev); | 176 | ret = device_register(&sch->dev); |
184 | mutex_unlock(&sch->reg_mutex); | 177 | mutex_unlock(&sch->reg_mutex); |
185 | return ret; | 178 | return ret; |
@@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid) | |||
327 | return PTR_ERR(sch); | 320 | return PTR_ERR(sch); |
328 | ret = css_register_subchannel(sch); | 321 | ret = css_register_subchannel(sch); |
329 | if (ret) | 322 | if (ret) |
330 | css_free_subchannel(sch); | 323 | put_device(&sch->dev); |
331 | return ret; | 324 | return ret; |
332 | } | 325 | } |
333 | 326 | ||
@@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) | |||
644 | * not working) so we do it now. This is true e.g. for the | 637 | * not working) so we do it now. This is true e.g. for the |
645 | * console subchannel. | 638 | * console subchannel. |
646 | */ | 639 | */ |
647 | css_register_subchannel(sch); | 640 | if (css_register_subchannel(sch)) { |
641 | if (!cio_is_console(schid)) | ||
642 | put_device(&sch->dev); | ||
643 | } | ||
648 | return 0; | 644 | return 0; |
649 | } | 645 | } |
650 | 646 | ||
@@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | |||
661 | css->global_pgid.pgid_high.cpu_addr = 0; | 657 | css->global_pgid.pgid_high.cpu_addr = 0; |
662 | #endif | 658 | #endif |
663 | } | 659 | } |
664 | css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; | 660 | css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; |
665 | css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; | 661 | css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; |
666 | css->global_pgid.tod_high = tod_high; | 662 | css->global_pgid.tod_high = tod_high; |
667 | 663 | ||
668 | } | 664 | } |
@@ -920,8 +916,10 @@ init_channel_subsystem (void) | |||
920 | goto out_device; | 916 | goto out_device; |
921 | } | 917 | } |
922 | ret = device_register(&css->pseudo_subchannel->dev); | 918 | ret = device_register(&css->pseudo_subchannel->dev); |
923 | if (ret) | 919 | if (ret) { |
920 | put_device(&css->pseudo_subchannel->dev); | ||
924 | goto out_file; | 921 | goto out_file; |
922 | } | ||
925 | } | 923 | } |
926 | ret = register_reboot_notifier(&css_reboot_notifier); | 924 | ret = register_reboot_notifier(&css_reboot_notifier); |
927 | if (ret) | 925 | if (ret) |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 3c57c1a18bb8..0f95405c2c5e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev) | |||
307 | 307 | ||
308 | static void ccw_device_unregister(struct ccw_device *cdev) | 308 | static void ccw_device_unregister(struct ccw_device *cdev) |
309 | { | 309 | { |
310 | if (test_and_clear_bit(1, &cdev->private->registered)) | 310 | if (test_and_clear_bit(1, &cdev->private->registered)) { |
311 | device_del(&cdev->dev); | 311 | device_del(&cdev->dev); |
312 | /* Release reference from device_initialize(). */ | ||
313 | put_device(&cdev->dev); | ||
314 | } | ||
312 | } | 315 | } |
313 | 316 | ||
314 | static void ccw_device_remove_orphan_cb(struct work_struct *work) | 317 | static void ccw_device_remove_orphan_cb(struct work_struct *work) |
@@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work) | |||
319 | priv = container_of(work, struct ccw_device_private, kick_work); | 322 | priv = container_of(work, struct ccw_device_private, kick_work); |
320 | cdev = priv->cdev; | 323 | cdev = priv->cdev; |
321 | ccw_device_unregister(cdev); | 324 | ccw_device_unregister(cdev); |
322 | put_device(&cdev->dev); | ||
323 | /* Release cdev reference for workqueue processing. */ | 325 | /* Release cdev reference for workqueue processing. */ |
324 | put_device(&cdev->dev); | 326 | put_device(&cdev->dev); |
325 | } | 327 | } |
@@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) | |||
333 | * Forced offline in disconnected state means | 335 | * Forced offline in disconnected state means |
334 | * 'throw away device'. | 336 | * 'throw away device'. |
335 | */ | 337 | */ |
336 | /* Get cdev reference for workqueue processing. */ | ||
337 | if (!get_device(&cdev->dev)) | ||
338 | return; | ||
339 | if (ccw_device_is_orphan(cdev)) { | 338 | if (ccw_device_is_orphan(cdev)) { |
340 | /* | 339 | /* |
341 | * Deregister ccw device. | 340 | * Deregister ccw device. |
342 | * Unfortunately, we cannot do this directly from the | 341 | * Unfortunately, we cannot do this directly from the |
343 | * attribute method. | 342 | * attribute method. |
344 | */ | 343 | */ |
344 | /* Get cdev reference for workqueue processing. */ | ||
345 | if (!get_device(&cdev->dev)) | ||
346 | return; | ||
345 | spin_lock_irqsave(cdev->ccwlock, flags); | 347 | spin_lock_irqsave(cdev->ccwlock, flags); |
346 | cdev->private->state = DEV_STATE_NOT_OPER; | 348 | cdev->private->state = DEV_STATE_NOT_OPER; |
347 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 349 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
@@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev) | |||
380 | } | 382 | } |
381 | cdev->online = 0; | 383 | cdev->online = 0; |
382 | spin_lock_irq(cdev->ccwlock); | 384 | spin_lock_irq(cdev->ccwlock); |
383 | ret = ccw_device_offline(cdev); | 385 | /* Wait until a final state or DISCONNECTED is reached */ |
384 | if (ret == -ENODEV) { | 386 | while (!dev_fsm_final_state(cdev) && |
385 | if (cdev->private->state != DEV_STATE_NOT_OPER) { | 387 | cdev->private->state != DEV_STATE_DISCONNECTED) { |
386 | cdev->private->state = DEV_STATE_OFFLINE; | ||
387 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
388 | } | ||
389 | spin_unlock_irq(cdev->ccwlock); | 388 | spin_unlock_irq(cdev->ccwlock); |
390 | /* Give up reference from ccw_device_set_online(). */ | 389 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
391 | put_device(&cdev->dev); | 390 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
392 | return ret; | 391 | spin_lock_irq(cdev->ccwlock); |
393 | } | 392 | } |
393 | ret = ccw_device_offline(cdev); | ||
394 | if (ret) | ||
395 | goto error; | ||
394 | spin_unlock_irq(cdev->ccwlock); | 396 | spin_unlock_irq(cdev->ccwlock); |
395 | if (ret == 0) { | 397 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
396 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | 398 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
397 | /* Give up reference from ccw_device_set_online(). */ | 399 | /* Give up reference from ccw_device_set_online(). */ |
398 | put_device(&cdev->dev); | 400 | put_device(&cdev->dev); |
399 | } else { | 401 | return 0; |
400 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " | 402 | |
401 | "device 0.%x.%04x\n", | 403 | error: |
402 | ret, cdev->private->dev_id.ssid, | 404 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n", |
403 | cdev->private->dev_id.devno); | 405 | ret, cdev->private->dev_id.ssid, |
404 | cdev->online = 1; | 406 | cdev->private->dev_id.devno); |
405 | } | 407 | cdev->private->state = DEV_STATE_OFFLINE; |
406 | return ret; | 408 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
409 | spin_unlock_irq(cdev->ccwlock); | ||
410 | /* Give up reference from ccw_device_set_online(). */ | ||
411 | put_device(&cdev->dev); | ||
412 | return -ENODEV; | ||
407 | } | 413 | } |
408 | 414 | ||
409 | /** | 415 | /** |
@@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev) | |||
421 | int ccw_device_set_online(struct ccw_device *cdev) | 427 | int ccw_device_set_online(struct ccw_device *cdev) |
422 | { | 428 | { |
423 | int ret; | 429 | int ret; |
430 | int ret2; | ||
424 | 431 | ||
425 | if (!cdev) | 432 | if (!cdev) |
426 | return -ENODEV; | 433 | return -ENODEV; |
@@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
444 | put_device(&cdev->dev); | 451 | put_device(&cdev->dev); |
445 | return ret; | 452 | return ret; |
446 | } | 453 | } |
447 | if (cdev->private->state != DEV_STATE_ONLINE) { | 454 | spin_lock_irq(cdev->ccwlock); |
455 | /* Check if online processing was successful */ | ||
456 | if ((cdev->private->state != DEV_STATE_ONLINE) && | ||
457 | (cdev->private->state != DEV_STATE_W4SENSE)) { | ||
458 | spin_unlock_irq(cdev->ccwlock); | ||
448 | /* Give up online reference since onlining failed. */ | 459 | /* Give up online reference since onlining failed. */ |
449 | put_device(&cdev->dev); | 460 | put_device(&cdev->dev); |
450 | return -ENODEV; | 461 | return -ENODEV; |
451 | } | 462 | } |
452 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { | 463 | spin_unlock_irq(cdev->ccwlock); |
453 | cdev->online = 1; | 464 | if (cdev->drv->set_online) |
454 | return 0; | 465 | ret = cdev->drv->set_online(cdev); |
455 | } | 466 | if (ret) |
467 | goto rollback; | ||
468 | cdev->online = 1; | ||
469 | return 0; | ||
470 | |||
471 | rollback: | ||
456 | spin_lock_irq(cdev->ccwlock); | 472 | spin_lock_irq(cdev->ccwlock); |
457 | ret = ccw_device_offline(cdev); | 473 | /* Wait until a final state or DISCONNECTED is reached */ |
474 | while (!dev_fsm_final_state(cdev) && | ||
475 | cdev->private->state != DEV_STATE_DISCONNECTED) { | ||
476 | spin_unlock_irq(cdev->ccwlock); | ||
477 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || | ||
478 | cdev->private->state == DEV_STATE_DISCONNECTED)); | ||
479 | spin_lock_irq(cdev->ccwlock); | ||
480 | } | ||
481 | ret2 = ccw_device_offline(cdev); | ||
482 | if (ret2) | ||
483 | goto error; | ||
484 | spin_unlock_irq(cdev->ccwlock); | ||
485 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || | ||
486 | cdev->private->state == DEV_STATE_DISCONNECTED)); | ||
487 | /* Give up online reference since onlining failed. */ | ||
488 | put_device(&cdev->dev); | ||
489 | return ret; | ||
490 | |||
491 | error: | ||
492 | CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " | ||
493 | "device 0.%x.%04x\n", | ||
494 | ret2, cdev->private->dev_id.ssid, | ||
495 | cdev->private->dev_id.devno); | ||
496 | cdev->private->state = DEV_STATE_OFFLINE; | ||
458 | spin_unlock_irq(cdev->ccwlock); | 497 | spin_unlock_irq(cdev->ccwlock); |
459 | if (ret == 0) | ||
460 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
461 | else | ||
462 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " | ||
463 | "device 0.%x.%04x\n", | ||
464 | ret, cdev->private->dev_id.ssid, | ||
465 | cdev->private->dev_id.devno); | ||
466 | /* Give up online reference since onlining failed. */ | 498 | /* Give up online reference since onlining failed. */ |
467 | put_device(&cdev->dev); | 499 | put_device(&cdev->dev); |
468 | return (ret == 0) ? -ENODEV : ret; | 500 | return ret; |
469 | } | 501 | } |
470 | 502 | ||
471 | static int online_store_handle_offline(struct ccw_device *cdev) | 503 | static int online_store_handle_offline(struct ccw_device *cdev) |
@@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev) | |||
637 | int ret; | 669 | int ret; |
638 | 670 | ||
639 | dev->bus = &ccw_bus_type; | 671 | dev->bus = &ccw_bus_type; |
640 | 672 | ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, | |
641 | if ((ret = device_add(dev))) | 673 | cdev->private->dev_id.devno); |
674 | if (ret) | ||
675 | return ret; | ||
676 | ret = device_add(dev); | ||
677 | if (ret) | ||
642 | return ret; | 678 | return ret; |
643 | 679 | ||
644 | set_bit(1, &cdev->private->registered); | 680 | set_bit(1, &cdev->private->registered); |
@@ -772,10 +808,8 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) | |||
772 | cdev = io_subchannel_allocate_dev(sch); | 808 | cdev = io_subchannel_allocate_dev(sch); |
773 | if (!IS_ERR(cdev)) { | 809 | if (!IS_ERR(cdev)) { |
774 | ret = io_subchannel_initialize_dev(sch, cdev); | 810 | ret = io_subchannel_initialize_dev(sch, cdev); |
775 | if (ret) { | 811 | if (ret) |
776 | kfree(cdev); | ||
777 | cdev = ERR_PTR(ret); | 812 | cdev = ERR_PTR(ret); |
778 | } | ||
779 | } | 813 | } |
780 | return cdev; | 814 | return cdev; |
781 | } | 815 | } |
@@ -1026,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) | |||
1026 | return; | 1060 | return; |
1027 | sch = to_subchannel(cdev->dev.parent); | 1061 | sch = to_subchannel(cdev->dev.parent); |
1028 | css_sch_device_unregister(sch); | 1062 | css_sch_device_unregister(sch); |
1029 | /* Reset intparm to zeroes. */ | ||
1030 | sch->config.intparm = 0; | ||
1031 | cio_commit_config(sch); | ||
1032 | /* Release cdev reference for workqueue processing.*/ | 1063 | /* Release cdev reference for workqueue processing.*/ |
1033 | put_device(&cdev->dev); | 1064 | put_device(&cdev->dev); |
1034 | /* Release subchannel reference for local processing. */ | 1065 | /* Release subchannel reference for local processing. */ |
@@ -1037,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) | |||
1037 | 1068 | ||
1038 | void ccw_device_schedule_sch_unregister(struct ccw_device *cdev) | 1069 | void ccw_device_schedule_sch_unregister(struct ccw_device *cdev) |
1039 | { | 1070 | { |
1071 | /* Get cdev reference for workqueue processing. */ | ||
1072 | if (!get_device(&cdev->dev)) | ||
1073 | return; | ||
1040 | PREPARE_WORK(&cdev->private->kick_work, | 1074 | PREPARE_WORK(&cdev->private->kick_work, |
1041 | ccw_device_call_sch_unregister); | 1075 | ccw_device_call_sch_unregister); |
1042 | queue_work(slow_path_wq, &cdev->private->kick_work); | 1076 | queue_work(slow_path_wq, &cdev->private->kick_work); |
@@ -1057,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
1057 | /* Device did not respond in time. */ | 1091 | /* Device did not respond in time. */ |
1058 | case DEV_STATE_NOT_OPER: | 1092 | case DEV_STATE_NOT_OPER: |
1059 | cdev->private->flags.recog_done = 1; | 1093 | cdev->private->flags.recog_done = 1; |
1060 | /* Remove device found not operational. */ | ||
1061 | if (!get_device(&cdev->dev)) | ||
1062 | break; | ||
1063 | ccw_device_schedule_sch_unregister(cdev); | 1094 | ccw_device_schedule_sch_unregister(cdev); |
1064 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1095 | if (atomic_dec_and_test(&ccw_device_init_count)) |
1065 | wake_up(&ccw_device_init_wq); | 1096 | wake_up(&ccw_device_init_wq); |
@@ -1097,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) | |||
1097 | init_waitqueue_head(&priv->wait_q); | 1128 | init_waitqueue_head(&priv->wait_q); |
1098 | init_timer(&priv->timer); | 1129 | init_timer(&priv->timer); |
1099 | 1130 | ||
1100 | /* Set an initial name for the device. */ | ||
1101 | if (cio_is_console(sch->schid)) | ||
1102 | cdev->dev.init_name = cio_get_console_cdev_name(sch); | ||
1103 | else | ||
1104 | dev_set_name(&cdev->dev, "0.%x.%04x", | ||
1105 | sch->schid.ssid, sch->schib.pmcw.dev); | ||
1106 | |||
1107 | /* Increase counter of devices currently in recognition. */ | 1131 | /* Increase counter of devices currently in recognition. */ |
1108 | atomic_inc(&ccw_device_init_count); | 1132 | atomic_inc(&ccw_device_init_count); |
1109 | 1133 | ||
@@ -1173,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
1173 | 1197 | ||
1174 | cdev = sch_get_cdev(sch); | 1198 | cdev = sch_get_cdev(sch); |
1175 | 1199 | ||
1176 | CIO_TRACE_EVENT(3, "IRQ"); | 1200 | CIO_TRACE_EVENT(6, "IRQ"); |
1177 | CIO_TRACE_EVENT(3, dev_name(&sch->dev)); | 1201 | CIO_TRACE_EVENT(6, dev_name(&sch->dev)); |
1178 | if (cdev) | 1202 | if (cdev) |
1179 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1203 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
1180 | } | 1204 | } |
@@ -1212,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work) | |||
1212 | 1236 | ||
1213 | sch = container_of(work, struct subchannel, work); | 1237 | sch = container_of(work, struct subchannel, work); |
1214 | css_sch_device_unregister(sch); | 1238 | css_sch_device_unregister(sch); |
1215 | /* Reset intparm to zeroes. */ | ||
1216 | sch->config.intparm = 0; | ||
1217 | cio_commit_config(sch); | ||
1218 | put_device(&sch->dev); | 1239 | put_device(&sch->dev); |
1219 | } | 1240 | } |
1220 | 1241 | ||
@@ -1336,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch) | |||
1336 | cdev->private->state = DEV_STATE_NOT_OPER; | 1357 | cdev->private->state = DEV_STATE_NOT_OPER; |
1337 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 1358 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
1338 | ccw_device_unregister(cdev); | 1359 | ccw_device_unregister(cdev); |
1339 | put_device(&cdev->dev); | ||
1340 | kfree(sch->private); | 1360 | kfree(sch->private); |
1341 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | 1361 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); |
1342 | return 0; | 1362 | return 0; |
@@ -1573,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data) | |||
1573 | spin_unlock_irq(cdev->ccwlock); | 1593 | spin_unlock_irq(cdev->ccwlock); |
1574 | if (!unreg) | 1594 | if (!unreg) |
1575 | goto out; | 1595 | goto out; |
1576 | if (!get_device(&cdev->dev)) | ||
1577 | goto out; | ||
1578 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid, | 1596 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid, |
1579 | priv->dev_id.devno); | 1597 | priv->dev_id.devno); |
1580 | ccw_device_schedule_sch_unregister(cdev); | 1598 | ccw_device_schedule_sch_unregister(cdev); |
@@ -1690,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1690 | spin_unlock_irqrestore(sch->lock, flags); | 1708 | spin_unlock_irqrestore(sch->lock, flags); |
1691 | css_sch_device_unregister(sch); | 1709 | css_sch_device_unregister(sch); |
1692 | spin_lock_irqsave(sch->lock, flags); | 1710 | spin_lock_irqsave(sch->lock, flags); |
1693 | |||
1694 | /* Reset intparm to zeroes. */ | ||
1695 | sch->config.intparm = 0; | ||
1696 | cio_commit_config(sch); | ||
1697 | break; | 1711 | break; |
1698 | case REPROBE: | 1712 | case REPROBE: |
1699 | ccw_device_trigger_reprobe(cdev); | 1713 | ccw_device_trigger_reprobe(cdev); |
@@ -1714,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1714 | 1728 | ||
1715 | #ifdef CONFIG_CCW_CONSOLE | 1729 | #ifdef CONFIG_CCW_CONSOLE |
1716 | static struct ccw_device console_cdev; | 1730 | static struct ccw_device console_cdev; |
1717 | static char console_cdev_name[10] = "0.x.xxxx"; | ||
1718 | static struct ccw_device_private console_private; | 1731 | static struct ccw_device_private console_private; |
1719 | static int console_cdev_in_use; | 1732 | static int console_cdev_in_use; |
1720 | 1733 | ||
@@ -1798,13 +1811,6 @@ int ccw_device_force_console(void) | |||
1798 | return ccw_device_pm_restore(&console_cdev.dev); | 1811 | return ccw_device_pm_restore(&console_cdev.dev); |
1799 | } | 1812 | } |
1800 | EXPORT_SYMBOL_GPL(ccw_device_force_console); | 1813 | EXPORT_SYMBOL_GPL(ccw_device_force_console); |
1801 | |||
1802 | const char *cio_get_console_cdev_name(struct subchannel *sch) | ||
1803 | { | ||
1804 | snprintf(console_cdev_name, 10, "0.%x.%04x", | ||
1805 | sch->schid.ssid, sch->schib.pmcw.dev); | ||
1806 | return (const char *)console_cdev_name; | ||
1807 | } | ||
1808 | #endif | 1814 | #endif |
1809 | 1815 | ||
1810 | /* | 1816 | /* |
@@ -2022,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) | |||
2022 | spin_unlock_irq(sch->lock); | 2028 | spin_unlock_irq(sch->lock); |
2023 | if (ret) { | 2029 | if (ret) { |
2024 | CIO_MSG_EVENT(0, "Couldn't start recognition for device " | 2030 | CIO_MSG_EVENT(0, "Couldn't start recognition for device " |
2025 | "%s (ret=%d)\n", dev_name(&cdev->dev), ret); | 2031 | "0.%x.%04x (ret=%d)\n", |
2032 | cdev->private->dev_id.ssid, | ||
2033 | cdev->private->dev_id.devno, ret); | ||
2026 | spin_lock_irq(sch->lock); | 2034 | spin_lock_irq(sch->lock); |
2027 | cdev->private->state = DEV_STATE_DISCONNECTED; | 2035 | cdev->private->state = DEV_STATE_DISCONNECTED; |
2028 | spin_unlock_irq(sch->lock); | 2036 | spin_unlock_irq(sch->lock); |
@@ -2085,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev) | |||
2085 | } | 2093 | } |
2086 | /* check if the device id has changed */ | 2094 | /* check if the device id has changed */ |
2087 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 2095 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
2088 | CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from " | 2096 | CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno " |
2089 | "%04x to %04x)\n", dev_name(&sch->dev), | 2097 | "changed from %04x to %04x)\n", |
2098 | sch->schid.ssid, sch->schid.sch_no, | ||
2090 | cdev->private->dev_id.devno, | 2099 | cdev->private->dev_id.devno, |
2091 | sch->schib.pmcw.dev); | 2100 | sch->schib.pmcw.dev); |
2092 | goto out_unreg_unlock; | 2101 | goto out_unreg_unlock; |
@@ -2119,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev) | |||
2119 | if (cm_enabled) { | 2128 | if (cm_enabled) { |
2120 | ret = ccw_set_cmf(cdev, 1); | 2129 | ret = ccw_set_cmf(cdev, 1); |
2121 | if (ret) { | 2130 | if (ret) { |
2122 | CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed " | 2131 | CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " |
2123 | "(rc=%d)\n", dev_name(&cdev->dev), ret); | 2132 | "(rc=%d)\n", cdev->private->dev_id.ssid, |
2133 | cdev->private->dev_id.devno, ret); | ||
2124 | ret = 0; | 2134 | ret = 0; |
2125 | } | 2135 | } |
2126 | } | 2136 | } |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 3db88c52d287..e728ce447f6e 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
394 | ccw_device_schedule_sch_unregister(cdev); | 394 | ccw_device_schedule_sch_unregister(cdev); |
395 | cdev->private->flags.donotify = 0; | 395 | cdev->private->flags.donotify = 0; |
396 | } | 396 | } |
397 | if (state == DEV_STATE_NOT_OPER) { | ||
398 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", | ||
399 | cdev->private->dev_id.devno, sch->schid.sch_no); | ||
400 | if (!ccw_device_notify(cdev, CIO_GONE)) | ||
401 | ccw_device_schedule_sch_unregister(cdev); | ||
402 | cdev->private->flags.donotify = 0; | ||
403 | } | ||
397 | 404 | ||
398 | if (cdev->private->flags.donotify) { | 405 | if (cdev->private->flags.donotify) { |
399 | cdev->private->flags.donotify = 0; | 406 | cdev->private->flags.donotify = 0; |
@@ -731,6 +738,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev, | |||
731 | } | 738 | } |
732 | 739 | ||
733 | /* | 740 | /* |
741 | * Handle path verification event in offline state. | ||
742 | */ | ||
743 | static void ccw_device_offline_verify(struct ccw_device *cdev, | ||
744 | enum dev_event dev_event) | ||
745 | { | ||
746 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
747 | |||
748 | css_schedule_eval(sch->schid); | ||
749 | } | ||
750 | |||
751 | /* | ||
734 | * Handle path verification event. | 752 | * Handle path verification event. |
735 | */ | 753 | */ |
736 | static void | 754 | static void |
@@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
887 | } | 905 | } |
888 | call_handler: | 906 | call_handler: |
889 | cdev->private->state = DEV_STATE_ONLINE; | 907 | cdev->private->state = DEV_STATE_ONLINE; |
908 | /* In case sensing interfered with setting the device online */ | ||
909 | wake_up(&cdev->private->wait_q); | ||
890 | /* Call the handler. */ | 910 | /* Call the handler. */ |
891 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) | 911 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) |
892 | /* Start delayed path verification. */ | 912 | /* Start delayed path verification. */ |
@@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1149 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 1169 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, |
1150 | [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, | 1170 | [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, |
1151 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | 1171 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, |
1152 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 1172 | [DEV_EVENT_VERIFY] = ccw_device_offline_verify, |
1153 | }, | 1173 | }, |
1154 | [DEV_STATE_VERIFY] = { | 1174 | [DEV_STATE_VERIFY] = { |
1155 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 1175 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index b1241f8fae88..ff7748a9199d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/s390/cio/qdio.h | 2 | * linux/drivers/s390/cio/qdio.h |
3 | * | 3 | * |
4 | * Copyright 2000,2008 IBM Corp. | 4 | * Copyright 2000,2009 IBM Corp. |
5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> | 5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> |
6 | * Jan Glauber <jang@linux.vnet.ibm.com> | 6 | * Jan Glauber <jang@linux.vnet.ibm.com> |
7 | */ | 7 | */ |
@@ -246,6 +246,7 @@ struct qdio_q { | |||
246 | atomic_t nr_buf_used; | 246 | atomic_t nr_buf_used; |
247 | 247 | ||
248 | struct qdio_irq *irq_ptr; | 248 | struct qdio_irq *irq_ptr; |
249 | struct dentry *debugfs_q; | ||
249 | struct tasklet_struct tasklet; | 250 | struct tasklet_struct tasklet; |
250 | 251 | ||
251 | /* error condition during a data transfer */ | 252 | /* error condition during a data transfer */ |
@@ -267,6 +268,7 @@ struct qdio_irq { | |||
267 | struct qib qib; | 268 | struct qib qib; |
268 | u32 *dsci; /* address of device state change indicator */ | 269 | u32 *dsci; /* address of device state change indicator */ |
269 | struct ccw_device *cdev; | 270 | struct ccw_device *cdev; |
271 | struct dentry *debugfs_dev; | ||
270 | 272 | ||
271 | unsigned long int_parm; | 273 | unsigned long int_parm; |
272 | struct subchannel_id schid; | 274 | struct subchannel_id schid; |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index b8626d4df116..1b78f639ead3 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -1,14 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/qdio_debug.c | 2 | * drivers/s390/cio/qdio_debug.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008,2009 |
5 | * | 5 | * |
6 | * Author: Jan Glauber (jang@linux.vnet.ibm.com) | 6 | * Author: Jan Glauber (jang@linux.vnet.ibm.com) |
7 | */ | 7 | */ |
8 | #include <linux/proc_fs.h> | ||
9 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
10 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
11 | #include <asm/qdio.h> | ||
12 | #include <asm/debug.h> | 10 | #include <asm/debug.h> |
13 | #include "qdio_debug.h" | 11 | #include "qdio_debug.h" |
14 | #include "qdio.h" | 12 | #include "qdio.h" |
@@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup; | |||
17 | debug_info_t *qdio_dbf_error; | 15 | debug_info_t *qdio_dbf_error; |
18 | 16 | ||
19 | static struct dentry *debugfs_root; | 17 | static struct dentry *debugfs_root; |
20 | #define MAX_DEBUGFS_QUEUES 32 | 18 | #define QDIO_DEBUGFS_NAME_LEN 10 |
21 | static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; | ||
22 | static DEFINE_MUTEX(debugfs_mutex); | ||
23 | #define QDIO_DEBUGFS_NAME_LEN 40 | ||
24 | 19 | ||
25 | void qdio_allocate_dbf(struct qdio_initialize *init_data, | 20 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
26 | struct qdio_irq *irq_ptr) | 21 | struct qdio_irq *irq_ptr) |
@@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp) | |||
130 | filp->f_path.dentry->d_inode->i_private); | 125 | filp->f_path.dentry->d_inode->i_private); |
131 | } | 126 | } |
132 | 127 | ||
133 | static void remove_debugfs_entry(struct qdio_q *q) | ||
134 | { | ||
135 | int i; | ||
136 | |||
137 | for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) { | ||
138 | if (!debugfs_queues[i]) | ||
139 | continue; | ||
140 | if (debugfs_queues[i]->d_inode->i_private == q) { | ||
141 | debugfs_remove(debugfs_queues[i]); | ||
142 | debugfs_queues[i] = NULL; | ||
143 | } | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static struct file_operations debugfs_fops = { | 128 | static struct file_operations debugfs_fops = { |
148 | .owner = THIS_MODULE, | 129 | .owner = THIS_MODULE, |
149 | .open = qstat_seq_open, | 130 | .open = qstat_seq_open, |
@@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = { | |||
155 | 136 | ||
156 | static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) | 137 | static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) |
157 | { | 138 | { |
158 | int i = 0; | ||
159 | char name[QDIO_DEBUGFS_NAME_LEN]; | 139 | char name[QDIO_DEBUGFS_NAME_LEN]; |
160 | 140 | ||
161 | while (debugfs_queues[i] != NULL) { | 141 | snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d", |
162 | i++; | ||
163 | if (i >= MAX_DEBUGFS_QUEUES) | ||
164 | return; | ||
165 | } | ||
166 | snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d", | ||
167 | dev_name(&cdev->dev), | ||
168 | q->is_input_q ? "input" : "output", | 142 | q->is_input_q ? "input" : "output", |
169 | q->nr); | 143 | q->nr); |
170 | debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, | 144 | q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, |
171 | debugfs_root, q, &debugfs_fops); | 145 | q->irq_ptr->debugfs_dev, q, &debugfs_fops); |
172 | if (IS_ERR(debugfs_queues[i])) | 146 | if (IS_ERR(q->debugfs_q)) |
173 | debugfs_queues[i] = NULL; | 147 | q->debugfs_q = NULL; |
174 | } | 148 | } |
175 | 149 | ||
176 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) | 150 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) |
@@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) | |||
178 | struct qdio_q *q; | 152 | struct qdio_q *q; |
179 | int i; | 153 | int i; |
180 | 154 | ||
181 | mutex_lock(&debugfs_mutex); | 155 | irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev), |
156 | debugfs_root); | ||
157 | if (IS_ERR(irq_ptr->debugfs_dev)) | ||
158 | irq_ptr->debugfs_dev = NULL; | ||
182 | for_each_input_queue(irq_ptr, q, i) | 159 | for_each_input_queue(irq_ptr, q, i) |
183 | setup_debugfs_entry(q, cdev); | 160 | setup_debugfs_entry(q, cdev); |
184 | for_each_output_queue(irq_ptr, q, i) | 161 | for_each_output_queue(irq_ptr, q, i) |
185 | setup_debugfs_entry(q, cdev); | 162 | setup_debugfs_entry(q, cdev); |
186 | mutex_unlock(&debugfs_mutex); | ||
187 | } | 163 | } |
188 | 164 | ||
189 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) | 165 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) |
@@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd | |||
191 | struct qdio_q *q; | 167 | struct qdio_q *q; |
192 | int i; | 168 | int i; |
193 | 169 | ||
194 | mutex_lock(&debugfs_mutex); | ||
195 | for_each_input_queue(irq_ptr, q, i) | 170 | for_each_input_queue(irq_ptr, q, i) |
196 | remove_debugfs_entry(q); | 171 | debugfs_remove(q->debugfs_q); |
197 | for_each_output_queue(irq_ptr, q, i) | 172 | for_each_output_queue(irq_ptr, q, i) |
198 | remove_debugfs_entry(q); | 173 | debugfs_remove(q->debugfs_q); |
199 | mutex_unlock(&debugfs_mutex); | 174 | debugfs_remove(irq_ptr->debugfs_dev); |
200 | } | 175 | } |
201 | 176 | ||
202 | int __init qdio_debug_init(void) | 177 | int __init qdio_debug_init(void) |
203 | { | 178 | { |
204 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); | 179 | debugfs_root = debugfs_create_dir("qdio", NULL); |
205 | 180 | ||
206 | qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); | 181 | qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); |
207 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | 182 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 0038750ad945..9aef402a5f1b 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
798 | 798 | ||
799 | if (!qdio_inbound_q_done(q)) { | 799 | if (!qdio_inbound_q_done(q)) { |
800 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | 800 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); |
801 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 801 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { |
802 | tasklet_schedule(&q->tasklet); | 802 | tasklet_schedule(&q->tasklet); |
803 | return; | ||
804 | } | ||
803 | } | 805 | } |
804 | 806 | ||
805 | qdio_stop_polling(q); | 807 | qdio_stop_polling(q); |
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c deleted file mode 100644 index f8da25ab576d..000000000000 --- a/drivers/s390/cio/scsw.c +++ /dev/null | |||
@@ -1,843 +0,0 @@ | |||
1 | /* | ||
2 | * Helper functions for scsw access. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/cio.h> | ||
11 | #include "css.h" | ||
12 | #include "chsc.h" | ||
13 | |||
14 | /** | ||
15 | * scsw_is_tm - check for transport mode scsw | ||
16 | * @scsw: pointer to scsw | ||
17 | * | ||
18 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
19 | * otherwise. | ||
20 | */ | ||
21 | int scsw_is_tm(union scsw *scsw) | ||
22 | { | ||
23 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
24 | } | ||
25 | EXPORT_SYMBOL(scsw_is_tm); | ||
26 | |||
27 | /** | ||
28 | * scsw_key - return scsw key field | ||
29 | * @scsw: pointer to scsw | ||
30 | * | ||
31 | * Return the value of the key field of the specified scsw, regardless of | ||
32 | * whether it is a transport mode or command mode scsw. | ||
33 | */ | ||
34 | u32 scsw_key(union scsw *scsw) | ||
35 | { | ||
36 | if (scsw_is_tm(scsw)) | ||
37 | return scsw->tm.key; | ||
38 | else | ||
39 | return scsw->cmd.key; | ||
40 | } | ||
41 | EXPORT_SYMBOL(scsw_key); | ||
42 | |||
43 | /** | ||
44 | * scsw_eswf - return scsw eswf field | ||
45 | * @scsw: pointer to scsw | ||
46 | * | ||
47 | * Return the value of the eswf field of the specified scsw, regardless of | ||
48 | * whether it is a transport mode or command mode scsw. | ||
49 | */ | ||
50 | u32 scsw_eswf(union scsw *scsw) | ||
51 | { | ||
52 | if (scsw_is_tm(scsw)) | ||
53 | return scsw->tm.eswf; | ||
54 | else | ||
55 | return scsw->cmd.eswf; | ||
56 | } | ||
57 | EXPORT_SYMBOL(scsw_eswf); | ||
58 | |||
59 | /** | ||
60 | * scsw_cc - return scsw cc field | ||
61 | * @scsw: pointer to scsw | ||
62 | * | ||
63 | * Return the value of the cc field of the specified scsw, regardless of | ||
64 | * whether it is a transport mode or command mode scsw. | ||
65 | */ | ||
66 | u32 scsw_cc(union scsw *scsw) | ||
67 | { | ||
68 | if (scsw_is_tm(scsw)) | ||
69 | return scsw->tm.cc; | ||
70 | else | ||
71 | return scsw->cmd.cc; | ||
72 | } | ||
73 | EXPORT_SYMBOL(scsw_cc); | ||
74 | |||
75 | /** | ||
76 | * scsw_ectl - return scsw ectl field | ||
77 | * @scsw: pointer to scsw | ||
78 | * | ||
79 | * Return the value of the ectl field of the specified scsw, regardless of | ||
80 | * whether it is a transport mode or command mode scsw. | ||
81 | */ | ||
82 | u32 scsw_ectl(union scsw *scsw) | ||
83 | { | ||
84 | if (scsw_is_tm(scsw)) | ||
85 | return scsw->tm.ectl; | ||
86 | else | ||
87 | return scsw->cmd.ectl; | ||
88 | } | ||
89 | EXPORT_SYMBOL(scsw_ectl); | ||
90 | |||
91 | /** | ||
92 | * scsw_pno - return scsw pno field | ||
93 | * @scsw: pointer to scsw | ||
94 | * | ||
95 | * Return the value of the pno field of the specified scsw, regardless of | ||
96 | * whether it is a transport mode or command mode scsw. | ||
97 | */ | ||
98 | u32 scsw_pno(union scsw *scsw) | ||
99 | { | ||
100 | if (scsw_is_tm(scsw)) | ||
101 | return scsw->tm.pno; | ||
102 | else | ||
103 | return scsw->cmd.pno; | ||
104 | } | ||
105 | EXPORT_SYMBOL(scsw_pno); | ||
106 | |||
107 | /** | ||
108 | * scsw_fctl - return scsw fctl field | ||
109 | * @scsw: pointer to scsw | ||
110 | * | ||
111 | * Return the value of the fctl field of the specified scsw, regardless of | ||
112 | * whether it is a transport mode or command mode scsw. | ||
113 | */ | ||
114 | u32 scsw_fctl(union scsw *scsw) | ||
115 | { | ||
116 | if (scsw_is_tm(scsw)) | ||
117 | return scsw->tm.fctl; | ||
118 | else | ||
119 | return scsw->cmd.fctl; | ||
120 | } | ||
121 | EXPORT_SYMBOL(scsw_fctl); | ||
122 | |||
123 | /** | ||
124 | * scsw_actl - return scsw actl field | ||
125 | * @scsw: pointer to scsw | ||
126 | * | ||
127 | * Return the value of the actl field of the specified scsw, regardless of | ||
128 | * whether it is a transport mode or command mode scsw. | ||
129 | */ | ||
130 | u32 scsw_actl(union scsw *scsw) | ||
131 | { | ||
132 | if (scsw_is_tm(scsw)) | ||
133 | return scsw->tm.actl; | ||
134 | else | ||
135 | return scsw->cmd.actl; | ||
136 | } | ||
137 | EXPORT_SYMBOL(scsw_actl); | ||
138 | |||
139 | /** | ||
140 | * scsw_stctl - return scsw stctl field | ||
141 | * @scsw: pointer to scsw | ||
142 | * | ||
143 | * Return the value of the stctl field of the specified scsw, regardless of | ||
144 | * whether it is a transport mode or command mode scsw. | ||
145 | */ | ||
146 | u32 scsw_stctl(union scsw *scsw) | ||
147 | { | ||
148 | if (scsw_is_tm(scsw)) | ||
149 | return scsw->tm.stctl; | ||
150 | else | ||
151 | return scsw->cmd.stctl; | ||
152 | } | ||
153 | EXPORT_SYMBOL(scsw_stctl); | ||
154 | |||
155 | /** | ||
156 | * scsw_dstat - return scsw dstat field | ||
157 | * @scsw: pointer to scsw | ||
158 | * | ||
159 | * Return the value of the dstat field of the specified scsw, regardless of | ||
160 | * whether it is a transport mode or command mode scsw. | ||
161 | */ | ||
162 | u32 scsw_dstat(union scsw *scsw) | ||
163 | { | ||
164 | if (scsw_is_tm(scsw)) | ||
165 | return scsw->tm.dstat; | ||
166 | else | ||
167 | return scsw->cmd.dstat; | ||
168 | } | ||
169 | EXPORT_SYMBOL(scsw_dstat); | ||
170 | |||
171 | /** | ||
172 | * scsw_cstat - return scsw cstat field | ||
173 | * @scsw: pointer to scsw | ||
174 | * | ||
175 | * Return the value of the cstat field of the specified scsw, regardless of | ||
176 | * whether it is a transport mode or command mode scsw. | ||
177 | */ | ||
178 | u32 scsw_cstat(union scsw *scsw) | ||
179 | { | ||
180 | if (scsw_is_tm(scsw)) | ||
181 | return scsw->tm.cstat; | ||
182 | else | ||
183 | return scsw->cmd.cstat; | ||
184 | } | ||
185 | EXPORT_SYMBOL(scsw_cstat); | ||
186 | |||
187 | /** | ||
188 | * scsw_cmd_is_valid_key - check key field validity | ||
189 | * @scsw: pointer to scsw | ||
190 | * | ||
191 | * Return non-zero if the key field of the specified command mode scsw is | ||
192 | * valid, zero otherwise. | ||
193 | */ | ||
194 | int scsw_cmd_is_valid_key(union scsw *scsw) | ||
195 | { | ||
196 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
197 | } | ||
198 | EXPORT_SYMBOL(scsw_cmd_is_valid_key); | ||
199 | |||
200 | /** | ||
201 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
202 | * @scsw: pointer to scsw | ||
203 | * | ||
204 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
205 | * valid, zero otherwise. | ||
206 | */ | ||
207 | int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
208 | { | ||
209 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
210 | } | ||
211 | EXPORT_SYMBOL(scsw_cmd_is_valid_sctl); | ||
212 | |||
213 | /** | ||
214 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
215 | * @scsw: pointer to scsw | ||
216 | * | ||
217 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
218 | * valid, zero otherwise. | ||
219 | */ | ||
220 | int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
221 | { | ||
222 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
223 | } | ||
224 | EXPORT_SYMBOL(scsw_cmd_is_valid_eswf); | ||
225 | |||
226 | /** | ||
227 | * scsw_cmd_is_valid_cc - check cc field validity | ||
228 | * @scsw: pointer to scsw | ||
229 | * | ||
230 | * Return non-zero if the cc field of the specified command mode scsw is | ||
231 | * valid, zero otherwise. | ||
232 | */ | ||
233 | int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
234 | { | ||
235 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
236 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
237 | } | ||
238 | EXPORT_SYMBOL(scsw_cmd_is_valid_cc); | ||
239 | |||
240 | /** | ||
241 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
242 | * @scsw: pointer to scsw | ||
243 | * | ||
244 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
245 | * valid, zero otherwise. | ||
246 | */ | ||
247 | int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
248 | { | ||
249 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
250 | } | ||
251 | EXPORT_SYMBOL(scsw_cmd_is_valid_fmt); | ||
252 | |||
253 | /** | ||
254 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
255 | * @scsw: pointer to scsw | ||
256 | * | ||
257 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
258 | * valid, zero otherwise. | ||
259 | */ | ||
260 | int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
261 | { | ||
262 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
263 | } | ||
264 | EXPORT_SYMBOL(scsw_cmd_is_valid_pfch); | ||
265 | |||
266 | /** | ||
267 | * scsw_cmd_is_valid_isic - check isic field validity | ||
268 | * @scsw: pointer to scsw | ||
269 | * | ||
270 | * Return non-zero if the isic field of the specified command mode scsw is | ||
271 | * valid, zero otherwise. | ||
272 | */ | ||
273 | int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
274 | { | ||
275 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
276 | } | ||
277 | EXPORT_SYMBOL(scsw_cmd_is_valid_isic); | ||
278 | |||
279 | /** | ||
280 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
281 | * @scsw: pointer to scsw | ||
282 | * | ||
283 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
284 | * valid, zero otherwise. | ||
285 | */ | ||
286 | int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
287 | { | ||
288 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
289 | } | ||
290 | EXPORT_SYMBOL(scsw_cmd_is_valid_alcc); | ||
291 | |||
292 | /** | ||
293 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
294 | * @scsw: pointer to scsw | ||
295 | * | ||
296 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
297 | * valid, zero otherwise. | ||
298 | */ | ||
299 | int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
300 | { | ||
301 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
302 | } | ||
303 | EXPORT_SYMBOL(scsw_cmd_is_valid_ssi); | ||
304 | |||
305 | /** | ||
306 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
307 | * @scsw: pointer to scsw | ||
308 | * | ||
309 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
310 | * valid, zero otherwise. | ||
311 | */ | ||
312 | int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
313 | { | ||
314 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
315 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
316 | } | ||
317 | EXPORT_SYMBOL(scsw_cmd_is_valid_zcc); | ||
318 | |||
319 | /** | ||
320 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
321 | * @scsw: pointer to scsw | ||
322 | * | ||
323 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
324 | * valid, zero otherwise. | ||
325 | */ | ||
326 | int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
327 | { | ||
328 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
329 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
330 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
331 | } | ||
332 | EXPORT_SYMBOL(scsw_cmd_is_valid_ectl); | ||
333 | |||
334 | /** | ||
335 | * scsw_cmd_is_valid_pno - check pno field validity | ||
336 | * @scsw: pointer to scsw | ||
337 | * | ||
338 | * Return non-zero if the pno field of the specified command mode scsw is | ||
339 | * valid, zero otherwise. | ||
340 | */ | ||
341 | int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
342 | { | ||
343 | return (scsw->cmd.fctl != 0) && | ||
344 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
345 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
346 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
347 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
348 | } | ||
349 | EXPORT_SYMBOL(scsw_cmd_is_valid_pno); | ||
350 | |||
351 | /** | ||
352 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
353 | * @scsw: pointer to scsw | ||
354 | * | ||
355 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
356 | * valid, zero otherwise. | ||
357 | */ | ||
358 | int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
359 | { | ||
360 | /* Only valid if pmcw.dnv == 1*/ | ||
361 | return 1; | ||
362 | } | ||
363 | EXPORT_SYMBOL(scsw_cmd_is_valid_fctl); | ||
364 | |||
365 | /** | ||
366 | * scsw_cmd_is_valid_actl - check actl field validity | ||
367 | * @scsw: pointer to scsw | ||
368 | * | ||
369 | * Return non-zero if the actl field of the specified command mode scsw is | ||
370 | * valid, zero otherwise. | ||
371 | */ | ||
372 | int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
373 | { | ||
374 | /* Only valid if pmcw.dnv == 1*/ | ||
375 | return 1; | ||
376 | } | ||
377 | EXPORT_SYMBOL(scsw_cmd_is_valid_actl); | ||
378 | |||
379 | /** | ||
380 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
381 | * @scsw: pointer to scsw | ||
382 | * | ||
383 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
384 | * valid, zero otherwise. | ||
385 | */ | ||
386 | int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
387 | { | ||
388 | /* Only valid if pmcw.dnv == 1*/ | ||
389 | return 1; | ||
390 | } | ||
391 | EXPORT_SYMBOL(scsw_cmd_is_valid_stctl); | ||
392 | |||
393 | /** | ||
394 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
395 | * @scsw: pointer to scsw | ||
396 | * | ||
397 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
398 | * valid, zero otherwise. | ||
399 | */ | ||
400 | int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
401 | { | ||
402 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
403 | (scsw->cmd.cc != 3); | ||
404 | } | ||
405 | EXPORT_SYMBOL(scsw_cmd_is_valid_dstat); | ||
406 | |||
407 | /** | ||
408 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
409 | * @scsw: pointer to scsw | ||
410 | * | ||
411 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
412 | * valid, zero otherwise. | ||
413 | */ | ||
414 | int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
415 | { | ||
416 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
417 | (scsw->cmd.cc != 3); | ||
418 | } | ||
419 | EXPORT_SYMBOL(scsw_cmd_is_valid_cstat); | ||
420 | |||
421 | /** | ||
422 | * scsw_tm_is_valid_key - check key field validity | ||
423 | * @scsw: pointer to scsw | ||
424 | * | ||
425 | * Return non-zero if the key field of the specified transport mode scsw is | ||
426 | * valid, zero otherwise. | ||
427 | */ | ||
428 | int scsw_tm_is_valid_key(union scsw *scsw) | ||
429 | { | ||
430 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
431 | } | ||
432 | EXPORT_SYMBOL(scsw_tm_is_valid_key); | ||
433 | |||
434 | /** | ||
435 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
436 | * @scsw: pointer to scsw | ||
437 | * | ||
438 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
439 | * valid, zero otherwise. | ||
440 | */ | ||
441 | int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
442 | { | ||
443 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
444 | } | ||
445 | EXPORT_SYMBOL(scsw_tm_is_valid_eswf); | ||
446 | |||
447 | /** | ||
448 | * scsw_tm_is_valid_cc - check cc field validity | ||
449 | * @scsw: pointer to scsw | ||
450 | * | ||
451 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
452 | * valid, zero otherwise. | ||
453 | */ | ||
454 | int scsw_tm_is_valid_cc(union scsw *scsw) | ||
455 | { | ||
456 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
457 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
458 | } | ||
459 | EXPORT_SYMBOL(scsw_tm_is_valid_cc); | ||
460 | |||
461 | /** | ||
462 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
463 | * @scsw: pointer to scsw | ||
464 | * | ||
465 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
466 | * valid, zero otherwise. | ||
467 | */ | ||
468 | int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
469 | { | ||
470 | return 1; | ||
471 | } | ||
472 | EXPORT_SYMBOL(scsw_tm_is_valid_fmt); | ||
473 | |||
474 | /** | ||
475 | * scsw_tm_is_valid_x - check x field validity | ||
476 | * @scsw: pointer to scsw | ||
477 | * | ||
478 | * Return non-zero if the x field of the specified transport mode scsw is | ||
479 | * valid, zero otherwise. | ||
480 | */ | ||
481 | int scsw_tm_is_valid_x(union scsw *scsw) | ||
482 | { | ||
483 | return 1; | ||
484 | } | ||
485 | EXPORT_SYMBOL(scsw_tm_is_valid_x); | ||
486 | |||
487 | /** | ||
488 | * scsw_tm_is_valid_q - check q field validity | ||
489 | * @scsw: pointer to scsw | ||
490 | * | ||
491 | * Return non-zero if the q field of the specified transport mode scsw is | ||
492 | * valid, zero otherwise. | ||
493 | */ | ||
494 | int scsw_tm_is_valid_q(union scsw *scsw) | ||
495 | { | ||
496 | return 1; | ||
497 | } | ||
498 | EXPORT_SYMBOL(scsw_tm_is_valid_q); | ||
499 | |||
500 | /** | ||
501 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
502 | * @scsw: pointer to scsw | ||
503 | * | ||
504 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
505 | * valid, zero otherwise. | ||
506 | */ | ||
507 | int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
508 | { | ||
509 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
510 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
511 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
512 | } | ||
513 | EXPORT_SYMBOL(scsw_tm_is_valid_ectl); | ||
514 | |||
515 | /** | ||
516 | * scsw_tm_is_valid_pno - check pno field validity | ||
517 | * @scsw: pointer to scsw | ||
518 | * | ||
519 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
520 | * valid, zero otherwise. | ||
521 | */ | ||
522 | int scsw_tm_is_valid_pno(union scsw *scsw) | ||
523 | { | ||
524 | return (scsw->tm.fctl != 0) && | ||
525 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
526 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
527 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
528 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
529 | } | ||
530 | EXPORT_SYMBOL(scsw_tm_is_valid_pno); | ||
531 | |||
532 | /** | ||
533 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
534 | * @scsw: pointer to scsw | ||
535 | * | ||
536 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
537 | * valid, zero otherwise. | ||
538 | */ | ||
539 | int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
540 | { | ||
541 | /* Only valid if pmcw.dnv == 1*/ | ||
542 | return 1; | ||
543 | } | ||
544 | EXPORT_SYMBOL(scsw_tm_is_valid_fctl); | ||
545 | |||
546 | /** | ||
547 | * scsw_tm_is_valid_actl - check actl field validity | ||
548 | * @scsw: pointer to scsw | ||
549 | * | ||
550 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
551 | * valid, zero otherwise. | ||
552 | */ | ||
553 | int scsw_tm_is_valid_actl(union scsw *scsw) | ||
554 | { | ||
555 | /* Only valid if pmcw.dnv == 1*/ | ||
556 | return 1; | ||
557 | } | ||
558 | EXPORT_SYMBOL(scsw_tm_is_valid_actl); | ||
559 | |||
560 | /** | ||
561 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
562 | * @scsw: pointer to scsw | ||
563 | * | ||
564 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
565 | * valid, zero otherwise. | ||
566 | */ | ||
567 | int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
568 | { | ||
569 | /* Only valid if pmcw.dnv == 1*/ | ||
570 | return 1; | ||
571 | } | ||
572 | EXPORT_SYMBOL(scsw_tm_is_valid_stctl); | ||
573 | |||
574 | /** | ||
575 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
576 | * @scsw: pointer to scsw | ||
577 | * | ||
578 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
579 | * valid, zero otherwise. | ||
580 | */ | ||
581 | int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
582 | { | ||
583 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
584 | (scsw->tm.cc != 3); | ||
585 | } | ||
586 | EXPORT_SYMBOL(scsw_tm_is_valid_dstat); | ||
587 | |||
588 | /** | ||
589 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
590 | * @scsw: pointer to scsw | ||
591 | * | ||
592 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
593 | * valid, zero otherwise. | ||
594 | */ | ||
595 | int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
596 | { | ||
597 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
598 | (scsw->tm.cc != 3); | ||
599 | } | ||
600 | EXPORT_SYMBOL(scsw_tm_is_valid_cstat); | ||
601 | |||
602 | /** | ||
603 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
604 | * @scsw: pointer to scsw | ||
605 | * | ||
606 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
607 | * valid, zero otherwise. | ||
608 | */ | ||
609 | int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
610 | { | ||
611 | return 1; | ||
612 | } | ||
613 | EXPORT_SYMBOL(scsw_tm_is_valid_fcxs); | ||
614 | |||
615 | /** | ||
616 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
617 | * @scsw: pointer to scsw | ||
618 | * | ||
619 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
620 | * valid, zero otherwise. | ||
621 | */ | ||
622 | int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
623 | { | ||
624 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
625 | SCHN_STAT_INTF_CTRL_CHK | | ||
626 | SCHN_STAT_PROT_CHECK | | ||
627 | SCHN_STAT_CHN_DATA_CHK)); | ||
628 | } | ||
629 | EXPORT_SYMBOL(scsw_tm_is_valid_schxs); | ||
630 | |||
631 | /** | ||
632 | * scsw_is_valid_actl - check actl field validity | ||
633 | * @scsw: pointer to scsw | ||
634 | * | ||
635 | * Return non-zero if the actl field of the specified scsw is valid, | ||
636 | * regardless of whether it is a transport mode or command mode scsw. | ||
637 | * Return zero if the field does not contain a valid value. | ||
638 | */ | ||
639 | int scsw_is_valid_actl(union scsw *scsw) | ||
640 | { | ||
641 | if (scsw_is_tm(scsw)) | ||
642 | return scsw_tm_is_valid_actl(scsw); | ||
643 | else | ||
644 | return scsw_cmd_is_valid_actl(scsw); | ||
645 | } | ||
646 | EXPORT_SYMBOL(scsw_is_valid_actl); | ||
647 | |||
648 | /** | ||
649 | * scsw_is_valid_cc - check cc field validity | ||
650 | * @scsw: pointer to scsw | ||
651 | * | ||
652 | * Return non-zero if the cc field of the specified scsw is valid, | ||
653 | * regardless of whether it is a transport mode or command mode scsw. | ||
654 | * Return zero if the field does not contain a valid value. | ||
655 | */ | ||
656 | int scsw_is_valid_cc(union scsw *scsw) | ||
657 | { | ||
658 | if (scsw_is_tm(scsw)) | ||
659 | return scsw_tm_is_valid_cc(scsw); | ||
660 | else | ||
661 | return scsw_cmd_is_valid_cc(scsw); | ||
662 | } | ||
663 | EXPORT_SYMBOL(scsw_is_valid_cc); | ||
664 | |||
665 | /** | ||
666 | * scsw_is_valid_cstat - check cstat field validity | ||
667 | * @scsw: pointer to scsw | ||
668 | * | ||
669 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
670 | * regardless of whether it is a transport mode or command mode scsw. | ||
671 | * Return zero if the field does not contain a valid value. | ||
672 | */ | ||
673 | int scsw_is_valid_cstat(union scsw *scsw) | ||
674 | { | ||
675 | if (scsw_is_tm(scsw)) | ||
676 | return scsw_tm_is_valid_cstat(scsw); | ||
677 | else | ||
678 | return scsw_cmd_is_valid_cstat(scsw); | ||
679 | } | ||
680 | EXPORT_SYMBOL(scsw_is_valid_cstat); | ||
681 | |||
682 | /** | ||
683 | * scsw_is_valid_dstat - check dstat field validity | ||
684 | * @scsw: pointer to scsw | ||
685 | * | ||
686 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
687 | * regardless of whether it is a transport mode or command mode scsw. | ||
688 | * Return zero if the field does not contain a valid value. | ||
689 | */ | ||
690 | int scsw_is_valid_dstat(union scsw *scsw) | ||
691 | { | ||
692 | if (scsw_is_tm(scsw)) | ||
693 | return scsw_tm_is_valid_dstat(scsw); | ||
694 | else | ||
695 | return scsw_cmd_is_valid_dstat(scsw); | ||
696 | } | ||
697 | EXPORT_SYMBOL(scsw_is_valid_dstat); | ||
698 | |||
699 | /** | ||
700 | * scsw_is_valid_ectl - check ectl field validity | ||
701 | * @scsw: pointer to scsw | ||
702 | * | ||
703 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
704 | * regardless of whether it is a transport mode or command mode scsw. | ||
705 | * Return zero if the field does not contain a valid value. | ||
706 | */ | ||
707 | int scsw_is_valid_ectl(union scsw *scsw) | ||
708 | { | ||
709 | if (scsw_is_tm(scsw)) | ||
710 | return scsw_tm_is_valid_ectl(scsw); | ||
711 | else | ||
712 | return scsw_cmd_is_valid_ectl(scsw); | ||
713 | } | ||
714 | EXPORT_SYMBOL(scsw_is_valid_ectl); | ||
715 | |||
716 | /** | ||
717 | * scsw_is_valid_eswf - check eswf field validity | ||
718 | * @scsw: pointer to scsw | ||
719 | * | ||
720 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
721 | * regardless of whether it is a transport mode or command mode scsw. | ||
722 | * Return zero if the field does not contain a valid value. | ||
723 | */ | ||
724 | int scsw_is_valid_eswf(union scsw *scsw) | ||
725 | { | ||
726 | if (scsw_is_tm(scsw)) | ||
727 | return scsw_tm_is_valid_eswf(scsw); | ||
728 | else | ||
729 | return scsw_cmd_is_valid_eswf(scsw); | ||
730 | } | ||
731 | EXPORT_SYMBOL(scsw_is_valid_eswf); | ||
732 | |||
733 | /** | ||
734 | * scsw_is_valid_fctl - check fctl field validity | ||
735 | * @scsw: pointer to scsw | ||
736 | * | ||
737 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
738 | * regardless of whether it is a transport mode or command mode scsw. | ||
739 | * Return zero if the field does not contain a valid value. | ||
740 | */ | ||
741 | int scsw_is_valid_fctl(union scsw *scsw) | ||
742 | { | ||
743 | if (scsw_is_tm(scsw)) | ||
744 | return scsw_tm_is_valid_fctl(scsw); | ||
745 | else | ||
746 | return scsw_cmd_is_valid_fctl(scsw); | ||
747 | } | ||
748 | EXPORT_SYMBOL(scsw_is_valid_fctl); | ||
749 | |||
750 | /** | ||
751 | * scsw_is_valid_key - check key field validity | ||
752 | * @scsw: pointer to scsw | ||
753 | * | ||
754 | * Return non-zero if the key field of the specified scsw is valid, | ||
755 | * regardless of whether it is a transport mode or command mode scsw. | ||
756 | * Return zero if the field does not contain a valid value. | ||
757 | */ | ||
758 | int scsw_is_valid_key(union scsw *scsw) | ||
759 | { | ||
760 | if (scsw_is_tm(scsw)) | ||
761 | return scsw_tm_is_valid_key(scsw); | ||
762 | else | ||
763 | return scsw_cmd_is_valid_key(scsw); | ||
764 | } | ||
765 | EXPORT_SYMBOL(scsw_is_valid_key); | ||
766 | |||
767 | /** | ||
768 | * scsw_is_valid_pno - check pno field validity | ||
769 | * @scsw: pointer to scsw | ||
770 | * | ||
771 | * Return non-zero if the pno field of the specified scsw is valid, | ||
772 | * regardless of whether it is a transport mode or command mode scsw. | ||
773 | * Return zero if the field does not contain a valid value. | ||
774 | */ | ||
775 | int scsw_is_valid_pno(union scsw *scsw) | ||
776 | { | ||
777 | if (scsw_is_tm(scsw)) | ||
778 | return scsw_tm_is_valid_pno(scsw); | ||
779 | else | ||
780 | return scsw_cmd_is_valid_pno(scsw); | ||
781 | } | ||
782 | EXPORT_SYMBOL(scsw_is_valid_pno); | ||
783 | |||
784 | /** | ||
785 | * scsw_is_valid_stctl - check stctl field validity | ||
786 | * @scsw: pointer to scsw | ||
787 | * | ||
788 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
789 | * regardless of whether it is a transport mode or command mode scsw. | ||
790 | * Return zero if the field does not contain a valid value. | ||
791 | */ | ||
792 | int scsw_is_valid_stctl(union scsw *scsw) | ||
793 | { | ||
794 | if (scsw_is_tm(scsw)) | ||
795 | return scsw_tm_is_valid_stctl(scsw); | ||
796 | else | ||
797 | return scsw_cmd_is_valid_stctl(scsw); | ||
798 | } | ||
799 | EXPORT_SYMBOL(scsw_is_valid_stctl); | ||
800 | |||
801 | /** | ||
802 | * scsw_cmd_is_solicited - check for solicited scsw | ||
803 | * @scsw: pointer to scsw | ||
804 | * | ||
805 | * Return non-zero if the command mode scsw indicates that the associated | ||
806 | * status condition is solicited, zero if it is unsolicited. | ||
807 | */ | ||
808 | int scsw_cmd_is_solicited(union scsw *scsw) | ||
809 | { | ||
810 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
812 | } | ||
813 | EXPORT_SYMBOL(scsw_cmd_is_solicited); | ||
814 | |||
815 | /** | ||
816 | * scsw_tm_is_solicited - check for solicited scsw | ||
817 | * @scsw: pointer to scsw | ||
818 | * | ||
819 | * Return non-zero if the transport mode scsw indicates that the associated | ||
820 | * status condition is solicited, zero if it is unsolicited. | ||
821 | */ | ||
822 | int scsw_tm_is_solicited(union scsw *scsw) | ||
823 | { | ||
824 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
825 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
826 | } | ||
827 | EXPORT_SYMBOL(scsw_tm_is_solicited); | ||
828 | |||
829 | /** | ||
830 | * scsw_is_solicited - check for solicited scsw | ||
831 | * @scsw: pointer to scsw | ||
832 | * | ||
833 | * Return non-zero if the transport or command mode scsw indicates that the | ||
834 | * associated status condition is solicited, zero if it is unsolicited. | ||
835 | */ | ||
836 | int scsw_is_solicited(union scsw *scsw) | ||
837 | { | ||
838 | if (scsw_is_tm(scsw)) | ||
839 | return scsw_tm_is_solicited(scsw); | ||
840 | else | ||
841 | return scsw_cmd_is_solicited(scsw); | ||
842 | } | ||
843 | EXPORT_SYMBOL(scsw_is_solicited); | ||
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ed3dcdea7fe1..090b32a339c6 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) | |||
648 | /* Poll on the device until all requests are finished. */ | 648 | /* Poll on the device until all requests are finished. */ |
649 | do { | 649 | do { |
650 | flags = 0; | 650 | flags = 0; |
651 | spin_lock_bh(&ap_dev->lock); | ||
651 | __ap_poll_device(ap_dev, &flags); | 652 | __ap_poll_device(ap_dev, &flags); |
653 | spin_unlock_bh(&ap_dev->lock); | ||
652 | } while ((flags & 1) || (flags & 2)); | 654 | } while ((flags & 1) || (flags & 2)); |
653 | 655 | ||
654 | ap_device_remove(dev); | 656 | ap_device_remove(dev); |
@@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused) | |||
1109 | 1111 | ||
1110 | ap_dev->device.bus = &ap_bus_type; | 1112 | ap_dev->device.bus = &ap_bus_type; |
1111 | ap_dev->device.parent = ap_root_device; | 1113 | ap_dev->device.parent = ap_root_device; |
1112 | dev_set_name(&ap_dev->device, "card%02x", | 1114 | if (dev_set_name(&ap_dev->device, "card%02x", |
1113 | AP_QID_DEVICE(ap_dev->qid)); | 1115 | AP_QID_DEVICE(ap_dev->qid))) { |
1116 | kfree(ap_dev); | ||
1117 | continue; | ||
1118 | } | ||
1114 | ap_dev->device.release = ap_device_release; | 1119 | ap_dev->device.release = ap_device_release; |
1115 | rc = device_register(&ap_dev->device); | 1120 | rc = device_register(&ap_dev->device); |
1116 | if (rc) { | 1121 | if (rc) { |
1117 | kfree(ap_dev); | 1122 | put_device(&ap_dev->device); |
1118 | continue; | 1123 | continue; |
1119 | } | 1124 | } |
1120 | /* Add device attributes. */ | 1125 | /* Add device attributes. */ |
@@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev) | |||
1407 | 1412 | ||
1408 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) | 1413 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) |
1409 | { | 1414 | { |
1410 | spin_lock(&ap_dev->lock); | ||
1411 | if (!ap_dev->unregistered) { | 1415 | if (!ap_dev->unregistered) { |
1412 | if (ap_poll_queue(ap_dev, flags)) | 1416 | if (ap_poll_queue(ap_dev, flags)) |
1413 | ap_dev->unregistered = 1; | 1417 | ap_dev->unregistered = 1; |
1414 | if (ap_dev->reset == AP_RESET_DO) | 1418 | if (ap_dev->reset == AP_RESET_DO) |
1415 | ap_reset(ap_dev); | 1419 | ap_reset(ap_dev); |
1416 | } | 1420 | } |
1417 | spin_unlock(&ap_dev->lock); | ||
1418 | return 0; | 1421 | return 0; |
1419 | } | 1422 | } |
1420 | 1423 | ||
@@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy) | |||
1441 | flags = 0; | 1444 | flags = 0; |
1442 | spin_lock(&ap_device_list_lock); | 1445 | spin_lock(&ap_device_list_lock); |
1443 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1446 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1447 | spin_lock(&ap_dev->lock); | ||
1444 | __ap_poll_device(ap_dev, &flags); | 1448 | __ap_poll_device(ap_dev, &flags); |
1449 | spin_unlock(&ap_dev->lock); | ||
1445 | } | 1450 | } |
1446 | spin_unlock(&ap_device_list_lock); | 1451 | spin_unlock(&ap_device_list_lock); |
1447 | } while (flags & 1); | 1452 | } while (flags & 1); |
@@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data) | |||
1487 | flags = 0; | 1492 | flags = 0; |
1488 | spin_lock_bh(&ap_device_list_lock); | 1493 | spin_lock_bh(&ap_device_list_lock); |
1489 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1494 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1495 | spin_lock(&ap_dev->lock); | ||
1490 | __ap_poll_device(ap_dev, &flags); | 1496 | __ap_poll_device(ap_dev, &flags); |
1497 | spin_unlock(&ap_dev->lock); | ||
1491 | } | 1498 | } |
1492 | spin_unlock_bh(&ap_device_list_lock); | 1499 | spin_unlock_bh(&ap_device_list_lock); |
1493 | } | 1500 | } |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index e38e5d306faf..2930fc763ac5 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
403 | return len; | 403 | return len; |
404 | } | 404 | } |
405 | 405 | ||
406 | void __init s390_virtio_console_init(void) | 406 | static int __init s390_virtio_console_init(void) |
407 | { | 407 | { |
408 | virtio_cons_early_init(early_put_chars); | 408 | if (!MACHINE_IS_KVM) |
409 | return -ENODEV; | ||
410 | return virtio_cons_early_init(early_put_chars); | ||
409 | } | 411 | } |
412 | console_initcall(s390_virtio_console_init); | ||
413 | |||
410 | 414 | ||
411 | /* | 415 | /* |
412 | * We do this after core stuff, but before the drivers. | 416 | * We do this after core stuff, but before the drivers. |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 271c4a82e84b..9215fbbccc08 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1838,9 +1838,10 @@ static int netiucv_register_device(struct net_device *ndev) | |||
1838 | return -ENOMEM; | 1838 | return -ENOMEM; |
1839 | 1839 | ||
1840 | ret = device_register(dev); | 1840 | ret = device_register(dev); |
1841 | 1841 | if (ret) { | |
1842 | if (ret) | 1842 | put_device(dev); |
1843 | return ret; | 1843 | return ret; |
1844 | } | ||
1844 | ret = netiucv_add_files(dev); | 1845 | ret = netiucv_add_files(dev); |
1845 | if (ret) | 1846 | if (ret) |
1846 | goto out_unreg; | 1847 | goto out_unreg; |
@@ -2225,8 +2226,10 @@ static int __init netiucv_init(void) | |||
2225 | netiucv_dev->release = (void (*)(struct device *))kfree; | 2226 | netiucv_dev->release = (void (*)(struct device *))kfree; |
2226 | netiucv_dev->driver = &netiucv_driver; | 2227 | netiucv_dev->driver = &netiucv_driver; |
2227 | rc = device_register(netiucv_dev); | 2228 | rc = device_register(netiucv_dev); |
2228 | if (rc) | 2229 | if (rc) { |
2230 | put_device(netiucv_dev); | ||
2229 | goto out_driver; | 2231 | goto out_driver; |
2232 | } | ||
2230 | netiucv_banner(); | 2233 | netiucv_banner(); |
2231 | return rc; | 2234 | return rc; |
2232 | 2235 | ||
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index e76a320d373b..102000d1af6f 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -219,13 +219,13 @@ static int __init smsg_init(void) | |||
219 | smsg_dev->driver = &smsg_driver; | 219 | smsg_dev->driver = &smsg_driver; |
220 | rc = device_register(smsg_dev); | 220 | rc = device_register(smsg_dev); |
221 | if (rc) | 221 | if (rc) |
222 | goto out_free_dev; | 222 | goto out_put; |
223 | 223 | ||
224 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 224 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | out_free_dev: | 227 | out_put: |
228 | kfree(smsg_dev); | 228 | put_device(smsg_dev); |
229 | out_free_path: | 229 | out_free_path: |
230 | iucv_path_free(smsg_path); | 230 | iucv_path_free(smsg_path); |
231 | smsg_path = NULL; | 231 | smsg_path = NULL; |
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index 15dab96d05e3..7c815d3327f7 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c | |||
@@ -537,8 +537,12 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp) | |||
537 | } | 537 | } |
538 | if (temp_index != 0 && fan_index != 0) { | 538 | if (temp_index != 0 && fan_index != 0) { |
539 | kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); | 539 | kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); |
540 | if (IS_ERR(kenvctrld_task)) | 540 | if (IS_ERR(kenvctrld_task)) { |
541 | return PTR_ERR(kenvctrld_task); | 541 | int err = PTR_ERR(kenvctrld_task); |
542 | |||
543 | kenvctrld_task = NULL; | ||
544 | return err; | ||
545 | } | ||
542 | } | 546 | } |
543 | 547 | ||
544 | return 0; | 548 | return 0; |
@@ -561,7 +565,8 @@ void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) | |||
561 | struct bbc_cpu_temperature *tp, *tpos; | 565 | struct bbc_cpu_temperature *tp, *tpos; |
562 | struct bbc_fan_control *fp, *fpos; | 566 | struct bbc_fan_control *fp, *fpos; |
563 | 567 | ||
564 | kthread_stop(kenvctrld_task); | 568 | if (kenvctrld_task) |
569 | kthread_stop(kenvctrld_task); | ||
565 | 570 | ||
566 | list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { | 571 | list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { |
567 | list_del(&tp->bp_list); | 572 | list_del(&tp->bp_list); |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c index 042d9bce9914..d0ab23a58355 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c +++ b/drivers/scsi/cxgb3i/cxgb3i_init.c | |||
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION); | |||
26 | 26 | ||
27 | static void open_s3_dev(struct t3cdev *); | 27 | static void open_s3_dev(struct t3cdev *); |
28 | static void close_s3_dev(struct t3cdev *); | 28 | static void close_s3_dev(struct t3cdev *); |
29 | static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error); | 29 | static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port); |
30 | 30 | ||
31 | static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; | 31 | static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; |
32 | static struct cxgb3_client t3c_client = { | 32 | static struct cxgb3_client t3c_client = { |
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = { | |||
34 | .handlers = cxgb3i_cpl_handlers, | 34 | .handlers = cxgb3i_cpl_handlers, |
35 | .add = open_s3_dev, | 35 | .add = open_s3_dev, |
36 | .remove = close_s3_dev, | 36 | .remove = close_s3_dev, |
37 | .err_handler = s3_err_handler, | 37 | .event_handler = s3_event_handler, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | /** | 40 | /** |
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev) | |||
66 | cxgb3i_ddp_cleanup(t3dev); | 66 | cxgb3i_ddp_cleanup(t3dev); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error) | 69 | static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port) |
70 | { | 70 | { |
71 | struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); | 71 | struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); |
72 | 72 | ||
73 | cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n", | 73 | cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n", |
74 | snic, tdev, status, error); | 74 | snic, tdev, event, port); |
75 | if (!snic) | 75 | if (!snic) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | switch (status) { | 78 | switch (event) { |
79 | case OFFLOAD_STATUS_DOWN: | 79 | case OFFLOAD_STATUS_DOWN: |
80 | snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; | 80 | snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; |
81 | break; | 81 | break; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index f3da592f7bcc..35a13867495e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work) | |||
119 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 119 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
120 | } | 120 | } |
121 | 121 | ||
122 | /** | ||
123 | * mpt2sas_base_start_watchdog - start the fault_reset_work_q | ||
124 | * @ioc: pointer to scsi command object | ||
125 | * Context: sleep. | ||
126 | * | ||
127 | * Return nothing. | ||
128 | */ | ||
129 | void | ||
130 | mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) | ||
131 | { | ||
132 | unsigned long flags; | ||
133 | |||
134 | if (ioc->fault_reset_work_q) | ||
135 | return; | ||
136 | |||
137 | /* initialize fault polling */ | ||
138 | INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); | ||
139 | snprintf(ioc->fault_reset_work_q_name, | ||
140 | sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); | ||
141 | ioc->fault_reset_work_q = | ||
142 | create_singlethread_workqueue(ioc->fault_reset_work_q_name); | ||
143 | if (!ioc->fault_reset_work_q) { | ||
144 | printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", | ||
145 | ioc->name, __func__, __LINE__); | ||
146 | return; | ||
147 | } | ||
148 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | ||
149 | if (ioc->fault_reset_work_q) | ||
150 | queue_delayed_work(ioc->fault_reset_work_q, | ||
151 | &ioc->fault_reset_work, | ||
152 | msecs_to_jiffies(FAULT_POLLING_INTERVAL)); | ||
153 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q | ||
158 | * @ioc: pointer to scsi command object | ||
159 | * Context: sleep. | ||
160 | * | ||
161 | * Return nothing. | ||
162 | */ | ||
163 | void | ||
164 | mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) | ||
165 | { | ||
166 | unsigned long flags; | ||
167 | struct workqueue_struct *wq; | ||
168 | |||
169 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | ||
170 | wq = ioc->fault_reset_work_q; | ||
171 | ioc->fault_reset_work_q = NULL; | ||
172 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | ||
173 | if (wq) { | ||
174 | if (!cancel_delayed_work(&ioc->fault_reset_work)) | ||
175 | flush_workqueue(wq); | ||
176 | destroy_workqueue(wq); | ||
177 | } | ||
178 | } | ||
179 | |||
122 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 180 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
123 | /** | 181 | /** |
124 | * _base_sas_ioc_info - verbose translation of the ioc status | 182 | * _base_sas_ioc_info - verbose translation of the ioc status |
@@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) | |||
440 | if (sas_loginfo.dw.bus_type != 3 /*SAS*/) | 498 | if (sas_loginfo.dw.bus_type != 3 /*SAS*/) |
441 | return; | 499 | return; |
442 | 500 | ||
501 | /* each nexus loss loginfo */ | ||
502 | if (log_info == 0x31170000) | ||
503 | return; | ||
504 | |||
443 | /* eat the loginfos associated with task aborts */ | 505 | /* eat the loginfos associated with task aborts */ |
444 | if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == | 506 | if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == |
445 | 0x31140000 || log_info == 0x31130000)) | 507 | 0x31140000 || log_info == 0x31130000)) |
@@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) | |||
1109 | } | 1171 | } |
1110 | } | 1172 | } |
1111 | 1173 | ||
1112 | pci_set_drvdata(pdev, ioc->shost); | ||
1113 | _base_mask_interrupts(ioc); | 1174 | _base_mask_interrupts(ioc); |
1114 | r = _base_enable_msix(ioc); | 1175 | r = _base_enable_msix(ioc); |
1115 | if (r) | 1176 | if (r) |
@@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) | |||
1132 | ioc->pci_irq = -1; | 1193 | ioc->pci_irq = -1; |
1133 | pci_release_selected_regions(ioc->pdev, ioc->bars); | 1194 | pci_release_selected_regions(ioc->pdev, ioc->bars); |
1134 | pci_disable_device(pdev); | 1195 | pci_disable_device(pdev); |
1135 | pci_set_drvdata(pdev, NULL); | ||
1136 | return r; | 1196 | return r; |
1137 | } | 1197 | } |
1138 | 1198 | ||
@@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) | |||
3191 | ioc->chip_phys = 0; | 3251 | ioc->chip_phys = 0; |
3192 | pci_release_selected_regions(ioc->pdev, ioc->bars); | 3252 | pci_release_selected_regions(ioc->pdev, ioc->bars); |
3193 | pci_disable_device(pdev); | 3253 | pci_disable_device(pdev); |
3194 | pci_set_drvdata(pdev, NULL); | ||
3195 | return; | 3254 | return; |
3196 | } | 3255 | } |
3197 | 3256 | ||
@@ -3205,7 +3264,6 @@ int | |||
3205 | mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | 3264 | mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
3206 | { | 3265 | { |
3207 | int r, i; | 3266 | int r, i; |
3208 | unsigned long flags; | ||
3209 | 3267 | ||
3210 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, | 3268 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
3211 | __func__)); | 3269 | __func__)); |
@@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3214 | if (r) | 3272 | if (r) |
3215 | return r; | 3273 | return r; |
3216 | 3274 | ||
3275 | pci_set_drvdata(ioc->pdev, ioc->shost); | ||
3217 | r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); | 3276 | r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); |
3218 | if (r) | 3277 | if (r) |
3219 | goto out_free_resources; | 3278 | goto out_free_resources; |
@@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3288 | if (r) | 3347 | if (r) |
3289 | goto out_free_resources; | 3348 | goto out_free_resources; |
3290 | 3349 | ||
3291 | /* initialize fault polling */ | 3350 | mpt2sas_base_start_watchdog(ioc); |
3292 | INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); | ||
3293 | snprintf(ioc->fault_reset_work_q_name, | ||
3294 | sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); | ||
3295 | ioc->fault_reset_work_q = | ||
3296 | create_singlethread_workqueue(ioc->fault_reset_work_q_name); | ||
3297 | if (!ioc->fault_reset_work_q) { | ||
3298 | printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", | ||
3299 | ioc->name, __func__, __LINE__); | ||
3300 | goto out_free_resources; | ||
3301 | } | ||
3302 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | ||
3303 | if (ioc->fault_reset_work_q) | ||
3304 | queue_delayed_work(ioc->fault_reset_work_q, | ||
3305 | &ioc->fault_reset_work, | ||
3306 | msecs_to_jiffies(FAULT_POLLING_INTERVAL)); | ||
3307 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | ||
3308 | return 0; | 3351 | return 0; |
3309 | 3352 | ||
3310 | out_free_resources: | 3353 | out_free_resources: |
@@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3312 | ioc->remove_host = 1; | 3355 | ioc->remove_host = 1; |
3313 | mpt2sas_base_free_resources(ioc); | 3356 | mpt2sas_base_free_resources(ioc); |
3314 | _base_release_memory_pools(ioc); | 3357 | _base_release_memory_pools(ioc); |
3358 | pci_set_drvdata(ioc->pdev, NULL); | ||
3315 | kfree(ioc->tm_cmds.reply); | 3359 | kfree(ioc->tm_cmds.reply); |
3316 | kfree(ioc->transport_cmds.reply); | 3360 | kfree(ioc->transport_cmds.reply); |
3317 | kfree(ioc->config_cmds.reply); | 3361 | kfree(ioc->config_cmds.reply); |
@@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3337 | void | 3381 | void |
3338 | mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) | 3382 | mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) |
3339 | { | 3383 | { |
3340 | unsigned long flags; | ||
3341 | struct workqueue_struct *wq; | ||
3342 | 3384 | ||
3343 | dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, | 3385 | dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
3344 | __func__)); | 3386 | __func__)); |
3345 | 3387 | ||
3346 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 3388 | mpt2sas_base_stop_watchdog(ioc); |
3347 | wq = ioc->fault_reset_work_q; | ||
3348 | ioc->fault_reset_work_q = NULL; | ||
3349 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | ||
3350 | if (!cancel_delayed_work(&ioc->fault_reset_work)) | ||
3351 | flush_workqueue(wq); | ||
3352 | destroy_workqueue(wq); | ||
3353 | |||
3354 | mpt2sas_base_free_resources(ioc); | 3389 | mpt2sas_base_free_resources(ioc); |
3355 | _base_release_memory_pools(ioc); | 3390 | _base_release_memory_pools(ioc); |
3391 | pci_set_drvdata(ioc->pdev, NULL); | ||
3356 | kfree(ioc->pfacts); | 3392 | kfree(ioc->pfacts); |
3357 | kfree(ioc->ctl_cmds.reply); | 3393 | kfree(ioc->ctl_cmds.reply); |
3358 | kfree(ioc->base_cmds.reply); | 3394 | kfree(ioc->base_cmds.reply); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 286c185fa9e4..acdcff150a35 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -69,10 +69,10 @@ | |||
69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
72 | #define MPT2SAS_DRIVER_VERSION "01.100.03.00" | 72 | #define MPT2SAS_DRIVER_VERSION "01.100.04.00" |
73 | #define MPT2SAS_MAJOR_VERSION 01 | 73 | #define MPT2SAS_MAJOR_VERSION 01 |
74 | #define MPT2SAS_MINOR_VERSION 100 | 74 | #define MPT2SAS_MINOR_VERSION 100 |
75 | #define MPT2SAS_BUILD_VERSION 03 | 75 | #define MPT2SAS_BUILD_VERSION 04 |
76 | #define MPT2SAS_RELEASE_VERSION 00 | 76 | #define MPT2SAS_RELEASE_VERSION 00 |
77 | 77 | ||
78 | /* | 78 | /* |
@@ -673,6 +673,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, | |||
673 | 673 | ||
674 | /* base shared API */ | 674 | /* base shared API */ |
675 | extern struct list_head mpt2sas_ioc_list; | 675 | extern struct list_head mpt2sas_ioc_list; |
676 | void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc); | ||
677 | void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc); | ||
676 | 678 | ||
677 | int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc); | 679 | int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc); |
678 | void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc); | 680 | void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 58cfb97846f7..6ddee161beb3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
@@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
236 | Mpi2ConfigRequest_t *config_request; | 236 | Mpi2ConfigRequest_t *config_request; |
237 | int r; | 237 | int r; |
238 | u8 retry_count; | 238 | u8 retry_count; |
239 | u8 issue_reset; | 239 | u8 issue_host_reset = 0; |
240 | u16 wait_state_count; | 240 | u16 wait_state_count; |
241 | 241 | ||
242 | mutex_lock(&ioc->config_cmds.mutex); | ||
242 | if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { | 243 | if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { |
243 | printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", | 244 | printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", |
244 | ioc->name, __func__); | 245 | ioc->name, __func__); |
246 | mutex_unlock(&ioc->config_cmds.mutex); | ||
245 | return -EAGAIN; | 247 | return -EAGAIN; |
246 | } | 248 | } |
247 | retry_count = 0; | 249 | retry_count = 0; |
248 | 250 | ||
249 | retry_config: | 251 | retry_config: |
252 | if (retry_count) { | ||
253 | if (retry_count > 2) /* attempt only 2 retries */ | ||
254 | return -EFAULT; | ||
255 | printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", | ||
256 | ioc->name, __func__, retry_count); | ||
257 | } | ||
250 | wait_state_count = 0; | 258 | wait_state_count = 0; |
251 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); | 259 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); |
252 | while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { | 260 | while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { |
@@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
254 | printk(MPT2SAS_ERR_FMT | 262 | printk(MPT2SAS_ERR_FMT |
255 | "%s: failed due to ioc not operational\n", | 263 | "%s: failed due to ioc not operational\n", |
256 | ioc->name, __func__); | 264 | ioc->name, __func__); |
257 | ioc->config_cmds.status = MPT2_CMD_NOT_USED; | 265 | r = -EFAULT; |
258 | return -EFAULT; | 266 | goto out; |
259 | } | 267 | } |
260 | ssleep(1); | 268 | ssleep(1); |
261 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); | 269 | ioc_state = mpt2sas_base_get_iocstate(ioc, 1); |
@@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
271 | if (!smid) { | 279 | if (!smid) { |
272 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", | 280 | printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", |
273 | ioc->name, __func__); | 281 | ioc->name, __func__); |
274 | ioc->config_cmds.status = MPT2_CMD_NOT_USED; | 282 | r = -EAGAIN; |
275 | return -EAGAIN; | 283 | goto out; |
276 | } | 284 | } |
277 | 285 | ||
278 | r = 0; | 286 | r = 0; |
@@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
292 | ioc->name, __func__); | 300 | ioc->name, __func__); |
293 | _debug_dump_mf(mpi_request, | 301 | _debug_dump_mf(mpi_request, |
294 | sizeof(Mpi2ConfigRequest_t)/4); | 302 | sizeof(Mpi2ConfigRequest_t)/4); |
295 | if (!(ioc->config_cmds.status & MPT2_CMD_RESET)) | 303 | retry_count++; |
296 | issue_reset = 1; | 304 | if (ioc->config_cmds.smid == smid) |
297 | goto issue_host_reset; | 305 | mpt2sas_base_free_smid(ioc, smid); |
306 | if ((ioc->shost_recovery) || | ||
307 | (ioc->config_cmds.status & MPT2_CMD_RESET)) | ||
308 | goto retry_config; | ||
309 | issue_host_reset = 1; | ||
310 | r = -EFAULT; | ||
311 | goto out; | ||
298 | } | 312 | } |
299 | if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) | 313 | if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) |
300 | memcpy(mpi_reply, ioc->config_cmds.reply, | 314 | memcpy(mpi_reply, ioc->config_cmds.reply, |
@@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
302 | if (retry_count) | 316 | if (retry_count) |
303 | printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", | 317 | printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", |
304 | ioc->name, __func__); | 318 | ioc->name, __func__); |
319 | out: | ||
305 | ioc->config_cmds.status = MPT2_CMD_NOT_USED; | 320 | ioc->config_cmds.status = MPT2_CMD_NOT_USED; |
306 | return r; | 321 | mutex_unlock(&ioc->config_cmds.mutex); |
307 | 322 | if (issue_host_reset) | |
308 | issue_host_reset: | ||
309 | if (issue_reset) | ||
310 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 323 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
311 | FORCE_BIG_HAMMER); | 324 | FORCE_BIG_HAMMER); |
312 | ioc->config_cmds.status = MPT2_CMD_NOT_USED; | 325 | return r; |
313 | if (!retry_count) { | ||
314 | printk(MPT2SAS_INFO_FMT "%s: attempting retry\n", | ||
315 | ioc->name, __func__); | ||
316 | retry_count++; | ||
317 | goto retry_config; | ||
318 | } | ||
319 | return -EFAULT; | ||
320 | } | 326 | } |
321 | 327 | ||
322 | /** | 328 | /** |
@@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
375 | int r; | 381 | int r; |
376 | struct config_request mem; | 382 | struct config_request mem; |
377 | 383 | ||
378 | mutex_lock(&ioc->config_cmds.mutex); | ||
379 | memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); | 384 | memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); |
380 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 385 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
381 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 386 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
417 | _config_free_config_dma_memory(ioc, &mem); | 422 | _config_free_config_dma_memory(ioc, &mem); |
418 | 423 | ||
419 | out: | 424 | out: |
420 | mutex_unlock(&ioc->config_cmds.mutex); | ||
421 | return r; | 425 | return r; |
422 | } | 426 | } |
423 | 427 | ||
@@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, | |||
438 | int r; | 442 | int r; |
439 | struct config_request mem; | 443 | struct config_request mem; |
440 | 444 | ||
441 | mutex_lock(&ioc->config_cmds.mutex); | ||
442 | memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); | 445 | memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); |
443 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 446 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
444 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 447 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, | |||
480 | _config_free_config_dma_memory(ioc, &mem); | 483 | _config_free_config_dma_memory(ioc, &mem); |
481 | 484 | ||
482 | out: | 485 | out: |
483 | mutex_unlock(&ioc->config_cmds.mutex); | ||
484 | return r; | 486 | return r; |
485 | } | 487 | } |
486 | 488 | ||
@@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
501 | int r; | 503 | int r; |
502 | struct config_request mem; | 504 | struct config_request mem; |
503 | 505 | ||
504 | mutex_lock(&ioc->config_cmds.mutex); | ||
505 | memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); | 506 | memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); |
506 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 507 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
507 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 508 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
543 | _config_free_config_dma_memory(ioc, &mem); | 544 | _config_free_config_dma_memory(ioc, &mem); |
544 | 545 | ||
545 | out: | 546 | out: |
546 | mutex_unlock(&ioc->config_cmds.mutex); | ||
547 | return r; | 547 | return r; |
548 | } | 548 | } |
549 | 549 | ||
@@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
564 | int r; | 564 | int r; |
565 | struct config_request mem; | 565 | struct config_request mem; |
566 | 566 | ||
567 | mutex_lock(&ioc->config_cmds.mutex); | ||
568 | memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); | 567 | memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); |
569 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 568 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
570 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 569 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
606 | _config_free_config_dma_memory(ioc, &mem); | 605 | _config_free_config_dma_memory(ioc, &mem); |
607 | 606 | ||
608 | out: | 607 | out: |
609 | mutex_unlock(&ioc->config_cmds.mutex); | ||
610 | return r; | 608 | return r; |
611 | } | 609 | } |
612 | 610 | ||
@@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
627 | int r; | 625 | int r; |
628 | struct config_request mem; | 626 | struct config_request mem; |
629 | 627 | ||
630 | mutex_lock(&ioc->config_cmds.mutex); | ||
631 | memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); | 628 | memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); |
632 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 629 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
633 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 630 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
669 | _config_free_config_dma_memory(ioc, &mem); | 666 | _config_free_config_dma_memory(ioc, &mem); |
670 | 667 | ||
671 | out: | 668 | out: |
672 | mutex_unlock(&ioc->config_cmds.mutex); | ||
673 | return r; | 669 | return r; |
674 | } | 670 | } |
675 | 671 | ||
@@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
690 | int r; | 686 | int r; |
691 | struct config_request mem; | 687 | struct config_request mem; |
692 | 688 | ||
693 | mutex_lock(&ioc->config_cmds.mutex); | ||
694 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 689 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
695 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 690 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
696 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | 691 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; |
@@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
732 | _config_free_config_dma_memory(ioc, &mem); | 727 | _config_free_config_dma_memory(ioc, &mem); |
733 | 728 | ||
734 | out: | 729 | out: |
735 | mutex_unlock(&ioc->config_cmds.mutex); | ||
736 | return r; | 730 | return r; |
737 | } | 731 | } |
738 | 732 | ||
@@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, | |||
753 | int r; | 747 | int r; |
754 | struct config_request mem; | 748 | struct config_request mem; |
755 | 749 | ||
756 | mutex_lock(&ioc->config_cmds.mutex); | ||
757 | memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); | 750 | memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); |
758 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 751 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
759 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 752 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, | |||
795 | _config_free_config_dma_memory(ioc, &mem); | 788 | _config_free_config_dma_memory(ioc, &mem); |
796 | 789 | ||
797 | out: | 790 | out: |
798 | mutex_unlock(&ioc->config_cmds.mutex); | ||
799 | return r; | 791 | return r; |
800 | } | 792 | } |
801 | 793 | ||
@@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
818 | int r; | 810 | int r; |
819 | struct config_request mem; | 811 | struct config_request mem; |
820 | 812 | ||
821 | mutex_lock(&ioc->config_cmds.mutex); | ||
822 | memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); | 813 | memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); |
823 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 814 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
824 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 815 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
863 | _config_free_config_dma_memory(ioc, &mem); | 854 | _config_free_config_dma_memory(ioc, &mem); |
864 | 855 | ||
865 | out: | 856 | out: |
866 | mutex_unlock(&ioc->config_cmds.mutex); | ||
867 | return r; | 857 | return r; |
868 | } | 858 | } |
869 | 859 | ||
@@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
886 | int r; | 876 | int r; |
887 | struct config_request mem; | 877 | struct config_request mem; |
888 | 878 | ||
889 | mutex_lock(&ioc->config_cmds.mutex); | ||
890 | memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); | 879 | memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); |
891 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 880 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
892 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 881 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
931 | _config_free_config_dma_memory(ioc, &mem); | 920 | _config_free_config_dma_memory(ioc, &mem); |
932 | 921 | ||
933 | out: | 922 | out: |
934 | mutex_unlock(&ioc->config_cmds.mutex); | ||
935 | return r; | 923 | return r; |
936 | } | 924 | } |
937 | 925 | ||
@@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) | |||
953 | Mpi2ConfigReply_t mpi_reply; | 941 | Mpi2ConfigReply_t mpi_reply; |
954 | Mpi2SasIOUnitPage0_t config_page; | 942 | Mpi2SasIOUnitPage0_t config_page; |
955 | 943 | ||
956 | mutex_lock(&ioc->config_cmds.mutex); | ||
957 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 944 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
958 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 945 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
959 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | 946 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; |
@@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) | |||
1002 | _config_free_config_dma_memory(ioc, &mem); | 989 | _config_free_config_dma_memory(ioc, &mem); |
1003 | 990 | ||
1004 | out: | 991 | out: |
1005 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1006 | return r; | 992 | return r; |
1007 | } | 993 | } |
1008 | 994 | ||
@@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1026 | Mpi2ConfigRequest_t mpi_request; | 1012 | Mpi2ConfigRequest_t mpi_request; |
1027 | int r; | 1013 | int r; |
1028 | struct config_request mem; | 1014 | struct config_request mem; |
1029 | |||
1030 | mutex_lock(&ioc->config_cmds.mutex); | ||
1031 | memset(config_page, 0, sz); | 1015 | memset(config_page, 0, sz); |
1032 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1016 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1033 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1017 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1070 | _config_free_config_dma_memory(ioc, &mem); | 1054 | _config_free_config_dma_memory(ioc, &mem); |
1071 | 1055 | ||
1072 | out: | 1056 | out: |
1073 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1074 | return r; | 1057 | return r; |
1075 | } | 1058 | } |
1076 | 1059 | ||
@@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1095 | int r; | 1078 | int r; |
1096 | struct config_request mem; | 1079 | struct config_request mem; |
1097 | 1080 | ||
1098 | mutex_lock(&ioc->config_cmds.mutex); | ||
1099 | memset(config_page, 0, sz); | 1081 | memset(config_page, 0, sz); |
1100 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1082 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1101 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1083 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1138 | _config_free_config_dma_memory(ioc, &mem); | 1120 | _config_free_config_dma_memory(ioc, &mem); |
1139 | 1121 | ||
1140 | out: | 1122 | out: |
1141 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1142 | return r; | 1123 | return r; |
1143 | } | 1124 | } |
1144 | 1125 | ||
@@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1161 | int r; | 1142 | int r; |
1162 | struct config_request mem; | 1143 | struct config_request mem; |
1163 | 1144 | ||
1164 | mutex_lock(&ioc->config_cmds.mutex); | ||
1165 | memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); | 1145 | memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); |
1166 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1146 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1167 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1147 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1206 | _config_free_config_dma_memory(ioc, &mem); | 1186 | _config_free_config_dma_memory(ioc, &mem); |
1207 | 1187 | ||
1208 | out: | 1188 | out: |
1209 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1210 | return r; | 1189 | return r; |
1211 | } | 1190 | } |
1212 | 1191 | ||
@@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1230 | int r; | 1209 | int r; |
1231 | struct config_request mem; | 1210 | struct config_request mem; |
1232 | 1211 | ||
1233 | mutex_lock(&ioc->config_cmds.mutex); | ||
1234 | memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); | 1212 | memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); |
1235 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1213 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1236 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1214 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1277 | _config_free_config_dma_memory(ioc, &mem); | 1255 | _config_free_config_dma_memory(ioc, &mem); |
1278 | 1256 | ||
1279 | out: | 1257 | out: |
1280 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1281 | return r; | 1258 | return r; |
1282 | } | 1259 | } |
1283 | 1260 | ||
@@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1300 | int r; | 1277 | int r; |
1301 | struct config_request mem; | 1278 | struct config_request mem; |
1302 | 1279 | ||
1303 | mutex_lock(&ioc->config_cmds.mutex); | ||
1304 | memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); | 1280 | memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); |
1305 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1281 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1306 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1282 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1345 | _config_free_config_dma_memory(ioc, &mem); | 1321 | _config_free_config_dma_memory(ioc, &mem); |
1346 | 1322 | ||
1347 | out: | 1323 | out: |
1348 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1349 | return r; | 1324 | return r; |
1350 | } | 1325 | } |
1351 | 1326 | ||
@@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1367 | int r; | 1342 | int r; |
1368 | struct config_request mem; | 1343 | struct config_request mem; |
1369 | 1344 | ||
1370 | mutex_lock(&ioc->config_cmds.mutex); | ||
1371 | memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); | 1345 | memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); |
1372 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1346 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1373 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1347 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1413 | _config_free_config_dma_memory(ioc, &mem); | 1387 | _config_free_config_dma_memory(ioc, &mem); |
1414 | 1388 | ||
1415 | out: | 1389 | out: |
1416 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1417 | return r; | 1390 | return r; |
1418 | } | 1391 | } |
1419 | 1392 | ||
@@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1435 | int r; | 1408 | int r; |
1436 | struct config_request mem; | 1409 | struct config_request mem; |
1437 | 1410 | ||
1438 | mutex_lock(&ioc->config_cmds.mutex); | ||
1439 | memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); | 1411 | memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); |
1440 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1412 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1441 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1413 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1481 | _config_free_config_dma_memory(ioc, &mem); | 1453 | _config_free_config_dma_memory(ioc, &mem); |
1482 | 1454 | ||
1483 | out: | 1455 | out: |
1484 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1485 | return r; | 1456 | return r; |
1486 | } | 1457 | } |
1487 | 1458 | ||
@@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
1505 | int r; | 1476 | int r; |
1506 | struct config_request mem; | 1477 | struct config_request mem; |
1507 | 1478 | ||
1508 | mutex_lock(&ioc->config_cmds.mutex); | ||
1509 | memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); | 1479 | memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); |
1510 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1480 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1511 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1481 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, | |||
1548 | _config_free_config_dma_memory(ioc, &mem); | 1518 | _config_free_config_dma_memory(ioc, &mem); |
1549 | 1519 | ||
1550 | out: | 1520 | out: |
1551 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1552 | return r; | 1521 | return r; |
1553 | } | 1522 | } |
1554 | 1523 | ||
@@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
1572 | struct config_request mem; | 1541 | struct config_request mem; |
1573 | u16 ioc_status; | 1542 | u16 ioc_status; |
1574 | 1543 | ||
1575 | mutex_lock(&ioc->config_cmds.mutex); | ||
1576 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1544 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1577 | *num_pds = 0; | 1545 | *num_pds = 0; |
1578 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1546 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
1620 | _config_free_config_dma_memory(ioc, &mem); | 1588 | _config_free_config_dma_memory(ioc, &mem); |
1621 | 1589 | ||
1622 | out: | 1590 | out: |
1623 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1624 | return r; | 1591 | return r; |
1625 | } | 1592 | } |
1626 | 1593 | ||
@@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
1645 | int r; | 1612 | int r; |
1646 | struct config_request mem; | 1613 | struct config_request mem; |
1647 | 1614 | ||
1648 | mutex_lock(&ioc->config_cmds.mutex); | ||
1649 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1615 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1650 | memset(config_page, 0, sz); | 1616 | memset(config_page, 0, sz); |
1651 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1617 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, | |||
1687 | _config_free_config_dma_memory(ioc, &mem); | 1653 | _config_free_config_dma_memory(ioc, &mem); |
1688 | 1654 | ||
1689 | out: | 1655 | out: |
1690 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1691 | return r; | 1656 | return r; |
1692 | } | 1657 | } |
1693 | 1658 | ||
@@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1711 | int r; | 1676 | int r; |
1712 | struct config_request mem; | 1677 | struct config_request mem; |
1713 | 1678 | ||
1714 | mutex_lock(&ioc->config_cmds.mutex); | ||
1715 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1679 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1716 | memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); | 1680 | memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); |
1717 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1681 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1754 | _config_free_config_dma_memory(ioc, &mem); | 1718 | _config_free_config_dma_memory(ioc, &mem); |
1755 | 1719 | ||
1756 | out: | 1720 | out: |
1757 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1758 | return r; | 1721 | return r; |
1759 | } | 1722 | } |
1760 | 1723 | ||
@@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, | |||
1778 | struct config_request mem; | 1741 | struct config_request mem; |
1779 | u16 ioc_status; | 1742 | u16 ioc_status; |
1780 | 1743 | ||
1781 | mutex_lock(&ioc->config_cmds.mutex); | ||
1782 | *volume_handle = 0; | 1744 | *volume_handle = 0; |
1783 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | 1745 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1784 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | 1746 | mpi_request.Function = MPI2_FUNCTION_CONFIG; |
@@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, | |||
1842 | _config_free_config_dma_memory(ioc, &mem); | 1804 | _config_free_config_dma_memory(ioc, &mem); |
1843 | 1805 | ||
1844 | out: | 1806 | out: |
1845 | mutex_unlock(&ioc->config_cmds.mutex); | ||
1846 | return r; | 1807 | return r; |
1847 | } | 1808 | } |
1848 | 1809 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 2a01a5f2a84d..2e9a4445596f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -2767,6 +2767,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
2767 | char *desc_ioc_state = NULL; | 2767 | char *desc_ioc_state = NULL; |
2768 | char *desc_scsi_status = NULL; | 2768 | char *desc_scsi_status = NULL; |
2769 | char *desc_scsi_state = ioc->tmp_string; | 2769 | char *desc_scsi_state = ioc->tmp_string; |
2770 | u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); | ||
2771 | |||
2772 | if (log_info == 0x31170000) | ||
2773 | return; | ||
2770 | 2774 | ||
2771 | switch (ioc_status) { | 2775 | switch (ioc_status) { |
2772 | case MPI2_IOCSTATUS_SUCCESS: | 2776 | case MPI2_IOCSTATUS_SUCCESS: |
@@ -3426,7 +3430,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
3426 | __le64 sas_address; | 3430 | __le64 sas_address; |
3427 | int i; | 3431 | int i; |
3428 | unsigned long flags; | 3432 | unsigned long flags; |
3429 | struct _sas_port *mpt2sas_port; | 3433 | struct _sas_port *mpt2sas_port = NULL; |
3430 | int rc = 0; | 3434 | int rc = 0; |
3431 | 3435 | ||
3432 | if (!handle) | 3436 | if (!handle) |
@@ -3518,12 +3522,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
3518 | &expander_pg1, i, handle))) { | 3522 | &expander_pg1, i, handle))) { |
3519 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 3523 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
3520 | ioc->name, __FILE__, __LINE__, __func__); | 3524 | ioc->name, __FILE__, __LINE__, __func__); |
3521 | continue; | 3525 | rc = -1; |
3526 | goto out_fail; | ||
3522 | } | 3527 | } |
3523 | sas_expander->phy[i].handle = handle; | 3528 | sas_expander->phy[i].handle = handle; |
3524 | sas_expander->phy[i].phy_id = i; | 3529 | sas_expander->phy[i].phy_id = i; |
3525 | mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], | 3530 | |
3526 | expander_pg1, sas_expander->parent_dev); | 3531 | if ((mpt2sas_transport_add_expander_phy(ioc, |
3532 | &sas_expander->phy[i], expander_pg1, | ||
3533 | sas_expander->parent_dev))) { | ||
3534 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
3535 | ioc->name, __FILE__, __LINE__, __func__); | ||
3536 | rc = -1; | ||
3537 | goto out_fail; | ||
3538 | } | ||
3527 | } | 3539 | } |
3528 | 3540 | ||
3529 | if (sas_expander->enclosure_handle) { | 3541 | if (sas_expander->enclosure_handle) { |
@@ -3540,8 +3552,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
3540 | 3552 | ||
3541 | out_fail: | 3553 | out_fail: |
3542 | 3554 | ||
3543 | if (sas_expander) | 3555 | if (mpt2sas_port) |
3544 | kfree(sas_expander->phy); | 3556 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, |
3557 | sas_expander->parent_handle); | ||
3545 | kfree(sas_expander); | 3558 | kfree(sas_expander); |
3546 | return rc; | 3559 | return rc; |
3547 | } | 3560 | } |
@@ -3663,12 +3676,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) | |||
3663 | sas_device->hidden_raid_component = is_pd; | 3676 | sas_device->hidden_raid_component = is_pd; |
3664 | 3677 | ||
3665 | /* get enclosure_logical_id */ | 3678 | /* get enclosure_logical_id */ |
3666 | if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, | 3679 | if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0( |
3667 | MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, | 3680 | ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, |
3668 | sas_device->enclosure_handle))) { | 3681 | sas_device->enclosure_handle))) |
3669 | sas_device->enclosure_logical_id = | 3682 | sas_device->enclosure_logical_id = |
3670 | le64_to_cpu(enclosure_pg0.EnclosureLogicalID); | 3683 | le64_to_cpu(enclosure_pg0.EnclosureLogicalID); |
3671 | } | ||
3672 | 3684 | ||
3673 | /* get device name */ | 3685 | /* get device name */ |
3674 | sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); | 3686 | sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); |
@@ -4250,12 +4262,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, | |||
4250 | u16 handle = le16_to_cpu(element->VolDevHandle); | 4262 | u16 handle = le16_to_cpu(element->VolDevHandle); |
4251 | int rc; | 4263 | int rc; |
4252 | 4264 | ||
4253 | #if 0 /* RAID_HACKS */ | ||
4254 | if (le32_to_cpu(event_data->Flags) & | ||
4255 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) | ||
4256 | return; | ||
4257 | #endif | ||
4258 | |||
4259 | mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); | 4265 | mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); |
4260 | if (!wwid) { | 4266 | if (!wwid) { |
4261 | printk(MPT2SAS_ERR_FMT | 4267 | printk(MPT2SAS_ERR_FMT |
@@ -4310,12 +4316,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, | |||
4310 | unsigned long flags; | 4316 | unsigned long flags; |
4311 | struct MPT2SAS_TARGET *sas_target_priv_data; | 4317 | struct MPT2SAS_TARGET *sas_target_priv_data; |
4312 | 4318 | ||
4313 | #if 0 /* RAID_HACKS */ | ||
4314 | if (le32_to_cpu(event_data->Flags) & | ||
4315 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) | ||
4316 | return; | ||
4317 | #endif | ||
4318 | |||
4319 | spin_lock_irqsave(&ioc->raid_device_lock, flags); | 4319 | spin_lock_irqsave(&ioc->raid_device_lock, flags); |
4320 | raid_device = _scsih_raid_device_find_by_handle(ioc, handle); | 4320 | raid_device = _scsih_raid_device_find_by_handle(ioc, handle); |
4321 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | 4321 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
@@ -4428,14 +4428,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, | |||
4428 | struct _sas_device *sas_device; | 4428 | struct _sas_device *sas_device; |
4429 | unsigned long flags; | 4429 | unsigned long flags; |
4430 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); | 4430 | u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
4431 | Mpi2ConfigReply_t mpi_reply; | ||
4432 | Mpi2SasDevicePage0_t sas_device_pg0; | ||
4433 | u32 ioc_status; | ||
4431 | 4434 | ||
4432 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 4435 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
4433 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | 4436 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
4434 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 4437 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
4435 | if (sas_device) | 4438 | if (sas_device) { |
4436 | sas_device->hidden_raid_component = 1; | 4439 | sas_device->hidden_raid_component = 1; |
4437 | else | 4440 | return; |
4438 | _scsih_add_device(ioc, handle, 0, 1); | 4441 | } |
4442 | |||
4443 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, | ||
4444 | MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { | ||
4445 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
4446 | ioc->name, __FILE__, __LINE__, __func__); | ||
4447 | return; | ||
4448 | } | ||
4449 | |||
4450 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
4451 | MPI2_IOCSTATUS_MASK; | ||
4452 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
4453 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
4454 | ioc->name, __FILE__, __LINE__, __func__); | ||
4455 | return; | ||
4456 | } | ||
4457 | |||
4458 | _scsih_link_change(ioc, | ||
4459 | le16_to_cpu(sas_device_pg0.ParentDevHandle), | ||
4460 | handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | ||
4461 | |||
4462 | _scsih_add_device(ioc, handle, 0, 1); | ||
4439 | } | 4463 | } |
4440 | 4464 | ||
4441 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4465 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
@@ -4535,12 +4559,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, | |||
4535 | { | 4559 | { |
4536 | Mpi2EventIrConfigElement_t *element; | 4560 | Mpi2EventIrConfigElement_t *element; |
4537 | int i; | 4561 | int i; |
4562 | u8 foreign_config; | ||
4538 | 4563 | ||
4539 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4564 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
4540 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 4565 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
4541 | _scsih_sas_ir_config_change_event_debug(ioc, event_data); | 4566 | _scsih_sas_ir_config_change_event_debug(ioc, event_data); |
4542 | 4567 | ||
4543 | #endif | 4568 | #endif |
4569 | foreign_config = (le32_to_cpu(event_data->Flags) & | ||
4570 | MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; | ||
4544 | 4571 | ||
4545 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; | 4572 | element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
4546 | for (i = 0; i < event_data->NumElements; i++, element++) { | 4573 | for (i = 0; i < event_data->NumElements; i++, element++) { |
@@ -4548,11 +4575,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, | |||
4548 | switch (element->ReasonCode) { | 4575 | switch (element->ReasonCode) { |
4549 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: | 4576 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: |
4550 | case MPI2_EVENT_IR_CHANGE_RC_ADDED: | 4577 | case MPI2_EVENT_IR_CHANGE_RC_ADDED: |
4551 | _scsih_sas_volume_add(ioc, element); | 4578 | if (!foreign_config) |
4579 | _scsih_sas_volume_add(ioc, element); | ||
4552 | break; | 4580 | break; |
4553 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: | 4581 | case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: |
4554 | case MPI2_EVENT_IR_CHANGE_RC_REMOVED: | 4582 | case MPI2_EVENT_IR_CHANGE_RC_REMOVED: |
4555 | _scsih_sas_volume_delete(ioc, element); | 4583 | if (!foreign_config) |
4584 | _scsih_sas_volume_delete(ioc, element); | ||
4556 | break; | 4585 | break; |
4557 | case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: | 4586 | case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: |
4558 | _scsih_sas_pd_hide(ioc, element); | 4587 | _scsih_sas_pd_hide(ioc, element); |
@@ -4671,6 +4700,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, | |||
4671 | u32 state; | 4700 | u32 state; |
4672 | struct _sas_device *sas_device; | 4701 | struct _sas_device *sas_device; |
4673 | unsigned long flags; | 4702 | unsigned long flags; |
4703 | Mpi2ConfigReply_t mpi_reply; | ||
4704 | Mpi2SasDevicePage0_t sas_device_pg0; | ||
4705 | u32 ioc_status; | ||
4674 | 4706 | ||
4675 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) | 4707 | if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) |
4676 | return; | 4708 | return; |
@@ -4687,22 +4719,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, | |||
4687 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | 4719 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
4688 | 4720 | ||
4689 | switch (state) { | 4721 | switch (state) { |
4690 | #if 0 | ||
4691 | case MPI2_RAID_PD_STATE_OFFLINE: | ||
4692 | if (sas_device) | ||
4693 | _scsih_remove_device(ioc, handle); | ||
4694 | break; | ||
4695 | #endif | ||
4696 | case MPI2_RAID_PD_STATE_ONLINE: | 4722 | case MPI2_RAID_PD_STATE_ONLINE: |
4697 | case MPI2_RAID_PD_STATE_DEGRADED: | 4723 | case MPI2_RAID_PD_STATE_DEGRADED: |
4698 | case MPI2_RAID_PD_STATE_REBUILDING: | 4724 | case MPI2_RAID_PD_STATE_REBUILDING: |
4699 | case MPI2_RAID_PD_STATE_OPTIMAL: | 4725 | case MPI2_RAID_PD_STATE_OPTIMAL: |
4700 | if (sas_device) | 4726 | if (sas_device) { |
4701 | sas_device->hidden_raid_component = 1; | 4727 | sas_device->hidden_raid_component = 1; |
4702 | else | 4728 | return; |
4703 | _scsih_add_device(ioc, handle, 0, 1); | 4729 | } |
4730 | |||
4731 | if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, | ||
4732 | &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, | ||
4733 | handle))) { | ||
4734 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
4735 | ioc->name, __FILE__, __LINE__, __func__); | ||
4736 | return; | ||
4737 | } | ||
4738 | |||
4739 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
4740 | MPI2_IOCSTATUS_MASK; | ||
4741 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
4742 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
4743 | ioc->name, __FILE__, __LINE__, __func__); | ||
4744 | return; | ||
4745 | } | ||
4746 | |||
4747 | _scsih_link_change(ioc, | ||
4748 | le16_to_cpu(sas_device_pg0.ParentDevHandle), | ||
4749 | handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); | ||
4750 | |||
4751 | _scsih_add_device(ioc, handle, 0, 1); | ||
4752 | |||
4704 | break; | 4753 | break; |
4705 | 4754 | ||
4755 | case MPI2_RAID_PD_STATE_OFFLINE: | ||
4706 | case MPI2_RAID_PD_STATE_NOT_CONFIGURED: | 4756 | case MPI2_RAID_PD_STATE_NOT_CONFIGURED: |
4707 | case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: | 4757 | case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: |
4708 | case MPI2_RAID_PD_STATE_HOT_SPARE: | 4758 | case MPI2_RAID_PD_STATE_HOT_SPARE: |
@@ -5774,6 +5824,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5774 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | 5824 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
5775 | u32 device_state; | 5825 | u32 device_state; |
5776 | 5826 | ||
5827 | mpt2sas_base_stop_watchdog(ioc); | ||
5777 | flush_scheduled_work(); | 5828 | flush_scheduled_work(); |
5778 | scsi_block_requests(shost); | 5829 | scsi_block_requests(shost); |
5779 | device_state = pci_choose_state(pdev, state); | 5830 | device_state = pci_choose_state(pdev, state); |
@@ -5816,6 +5867,7 @@ _scsih_resume(struct pci_dev *pdev) | |||
5816 | 5867 | ||
5817 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); | 5868 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); |
5818 | scsi_unblock_requests(shost); | 5869 | scsi_unblock_requests(shost); |
5870 | mpt2sas_base_start_watchdog(ioc); | ||
5819 | return 0; | 5871 | return 0; |
5820 | } | 5872 | } |
5821 | #endif /* CONFIG_PM */ | 5873 | #endif /* CONFIG_PM */ |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 037c1e0b7c4c..6553833c12db 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -527,7 +527,7 @@ config SERIAL_S3C24A0 | |||
527 | 527 | ||
528 | config SERIAL_S3C6400 | 528 | config SERIAL_S3C6400 |
529 | tristate "Samsung S3C6400/S3C6410 Serial port support" | 529 | tristate "Samsung S3C6400/S3C6410 Serial port support" |
530 | depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410) | 530 | depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410) |
531 | default y | 531 | default y |
532 | help | 532 | help |
533 | Serial port support for the Samsung S3C6400 and S3C6410 | 533 | Serial port support for the Samsung S3C6400 and S3C6410 |
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index e0d44af4745a..3f3119d760db 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c | |||
@@ -111,29 +111,32 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi, | |||
111 | unsigned int bpw; | 111 | unsigned int bpw; |
112 | unsigned int hz; | 112 | unsigned int hz; |
113 | unsigned int div; | 113 | unsigned int div; |
114 | unsigned long clk; | ||
114 | 115 | ||
115 | bpw = t ? t->bits_per_word : spi->bits_per_word; | 116 | bpw = t ? t->bits_per_word : spi->bits_per_word; |
116 | hz = t ? t->speed_hz : spi->max_speed_hz; | 117 | hz = t ? t->speed_hz : spi->max_speed_hz; |
117 | 118 | ||
119 | if (!bpw) | ||
120 | bpw = 8; | ||
121 | |||
122 | if (!hz) | ||
123 | hz = spi->max_speed_hz; | ||
124 | |||
118 | if (bpw != 8) { | 125 | if (bpw != 8) { |
119 | dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); | 126 | dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); |
120 | return -EINVAL; | 127 | return -EINVAL; |
121 | } | 128 | } |
122 | 129 | ||
123 | div = clk_get_rate(hw->clk) / hz; | 130 | clk = clk_get_rate(hw->clk); |
124 | 131 | div = DIV_ROUND_UP(clk, hz * 2) - 1; | |
125 | /* is clk = pclk / (2 * (pre+1)), or is it | ||
126 | * clk = (pclk * 2) / ( pre + 1) */ | ||
127 | |||
128 | div /= 2; | ||
129 | |||
130 | if (div > 0) | ||
131 | div -= 1; | ||
132 | 132 | ||
133 | if (div > 255) | 133 | if (div > 255) |
134 | div = 255; | 134 | div = 255; |
135 | 135 | ||
136 | dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); | 136 | dev_dbg(&spi->dev, "setting pre-scaler to %d (wanted %d, got %ld)\n", |
137 | div, hz, clk / (2 * (div + 1))); | ||
138 | |||
139 | |||
137 | writeb(div, hw->regs + S3C2410_SPPRE); | 140 | writeb(div, hw->regs + S3C2410_SPPRE); |
138 | 141 | ||
139 | spin_lock(&hw->bitbang.lock); | 142 | spin_lock(&hw->bitbang.lock); |
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 9d7c99394ec6..640f65c6ef84 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c | |||
@@ -1752,12 +1752,12 @@ static int comedi_open(struct inode *inode, struct file *file) | |||
1752 | mutex_lock(&dev->mutex); | 1752 | mutex_lock(&dev->mutex); |
1753 | if (dev->attached) | 1753 | if (dev->attached) |
1754 | goto ok; | 1754 | goto ok; |
1755 | if (!capable(CAP_SYS_MODULE) && dev->in_request_module) { | 1755 | if (!capable(CAP_NET_ADMIN) && dev->in_request_module) { |
1756 | DPRINTK("in request module\n"); | 1756 | DPRINTK("in request module\n"); |
1757 | mutex_unlock(&dev->mutex); | 1757 | mutex_unlock(&dev->mutex); |
1758 | return -ENODEV; | 1758 | return -ENODEV; |
1759 | } | 1759 | } |
1760 | if (capable(CAP_SYS_MODULE) && dev->in_request_module) | 1760 | if (capable(CAP_NET_ADMIN) && dev->in_request_module) |
1761 | goto ok; | 1761 | goto ok; |
1762 | 1762 | ||
1763 | dev->in_request_module = 1; | 1763 | dev->in_request_module = 1; |
@@ -1770,8 +1770,8 @@ static int comedi_open(struct inode *inode, struct file *file) | |||
1770 | 1770 | ||
1771 | dev->in_request_module = 0; | 1771 | dev->in_request_module = 0; |
1772 | 1772 | ||
1773 | if (!dev->attached && !capable(CAP_SYS_MODULE)) { | 1773 | if (!dev->attached && !capable(CAP_NET_ADMIN)) { |
1774 | DPRINTK("not attached and not CAP_SYS_MODULE\n"); | 1774 | DPRINTK("not attached and not CAP_NET_ADMIN\n"); |
1775 | mutex_unlock(&dev->mutex); | 1775 | mutex_unlock(&dev->mutex); |
1776 | return -ENODEV; | 1776 | return -ENODEV; |
1777 | } | 1777 | } |
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 7b605795b770..e63c9bea6c54 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c | |||
@@ -1950,14 +1950,7 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type, | |||
1950 | */ | 1950 | */ |
1951 | static void pohmelfs_kill_super(struct super_block *sb) | 1951 | static void pohmelfs_kill_super(struct super_block *sb) |
1952 | { | 1952 | { |
1953 | struct writeback_control wbc = { | 1953 | sync_inodes_sb(sb); |
1954 | .sync_mode = WB_SYNC_ALL, | ||
1955 | .range_start = 0, | ||
1956 | .range_end = LLONG_MAX, | ||
1957 | .nr_to_write = LONG_MAX, | ||
1958 | }; | ||
1959 | generic_sync_sb_inodes(sb, &wbc); | ||
1960 | |||
1961 | kill_anon_super(sb); | 1954 | kill_anon_super(sb); |
1962 | } | 1955 | } |
1963 | 1956 | ||
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0a69672097a8..4e83c297ec9e 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
953 | 953 | ||
954 | mutex_lock(&tz->lock); | 954 | mutex_lock(&tz->lock); |
955 | 955 | ||
956 | tz->ops->get_temp(tz, &temp); | 956 | if (tz->ops->get_temp(tz, &temp)) { |
957 | /* get_temp failed - retry it later */ | ||
958 | printk(KERN_WARNING PREFIX "failed to read out thermal zone " | ||
959 | "%d\n", tz->id); | ||
960 | goto leave; | ||
961 | } | ||
957 | 962 | ||
958 | for (count = 0; count < tz->trips; count++) { | 963 | for (count = 0; count < tz->trips; count++) { |
959 | tz->ops->get_trip_type(tz, count, &trip_type); | 964 | tz->ops->get_trip_type(tz, count, &trip_type); |
@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
1005 | THERMAL_TRIPS_NONE); | 1010 | THERMAL_TRIPS_NONE); |
1006 | 1011 | ||
1007 | tz->last_temperature = temp; | 1012 | tz->last_temperature = temp; |
1013 | |||
1014 | leave: | ||
1008 | if (tz->passive) | 1015 | if (tz->passive) |
1009 | thermal_zone_device_set_polling(tz, tz->passive_delay); | 1016 | thermal_zone_device_set_polling(tz, tz->passive_delay); |
1010 | else if (tz->polling_delay) | 1017 | else if (tz->polling_delay) |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 8f24564f77b0..07f22b625632 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -481,6 +481,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
481 | /* tell the board code to enable the panel */ | 481 | /* tell the board code to enable the panel */ |
482 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | 482 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { |
483 | ch = &priv->ch[k]; | 483 | ch = &priv->ch[k]; |
484 | if (!ch->enabled) | ||
485 | continue; | ||
486 | |||
484 | board_cfg = &ch->cfg.board_cfg; | 487 | board_cfg = &ch->cfg.board_cfg; |
485 | if (board_cfg->display_on) | 488 | if (board_cfg->display_on) |
486 | board_cfg->display_on(board_cfg->board_data); | 489 | board_cfg->display_on(board_cfg->board_data); |
@@ -498,6 +501,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) | |||
498 | /* clean up deferred io and ask board code to disable panel */ | 501 | /* clean up deferred io and ask board code to disable panel */ |
499 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | 502 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { |
500 | ch = &priv->ch[k]; | 503 | ch = &priv->ch[k]; |
504 | if (!ch->enabled) | ||
505 | continue; | ||
501 | 506 | ||
502 | /* deferred io mode: | 507 | /* deferred io mode: |
503 | * flush frame, and wait for frame end interrupt | 508 | * flush frame, and wait for frame end interrupt |
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index 15502d5e3641..54cd91610174 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c | |||
@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, | |||
454 | 454 | ||
455 | xenfb_init_shared_page(info, fb_info); | 455 | xenfb_init_shared_page(info, fb_info); |
456 | 456 | ||
457 | ret = xenfb_connect_backend(dev, info); | ||
458 | if (ret < 0) | ||
459 | goto error; | ||
460 | |||
457 | ret = register_framebuffer(fb_info); | 461 | ret = register_framebuffer(fb_info); |
458 | if (ret) { | 462 | if (ret) { |
459 | fb_deferred_io_cleanup(fb_info); | 463 | fb_deferred_io_cleanup(fb_info); |
@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, | |||
464 | } | 468 | } |
465 | info->fb_info = fb_info; | 469 | info->fb_info = fb_info; |
466 | 470 | ||
467 | ret = xenfb_connect_backend(dev, info); | ||
468 | if (ret < 0) | ||
469 | goto error; | ||
470 | |||
471 | xenfb_make_preferred_console(); | 471 | xenfb_make_preferred_console(); |
472 | return 0; | 472 | return 0; |
473 | 473 | ||
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index 3fe9742c23ca..2f8643efe92c 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
38 | 38 | ||
39 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
40 | #include <asm/ar7/ar7.h> | 40 | #include <asm/mach-ar7/ar7.h> |
41 | 41 | ||
42 | #define DRVNAME "ar7_wdt" | 42 | #define DRVNAME "ar7_wdt" |
43 | #define LONGNAME "TI AR7 Watchdog Timer" | 43 | #define LONGNAME "TI AR7 Watchdog Timer" |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index ec2a39b1e26f..7c284342f30f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -1,6 +1,9 @@ | |||
1 | obj-y += grant-table.o features.o events.o manage.o | 1 | obj-y += grant-table.o features.o events.o manage.o |
2 | obj-y += xenbus/ | 2 | obj-y += xenbus/ |
3 | 3 | ||
4 | nostackp := $(call cc-option, -fno-stack-protector) | ||
5 | CFLAGS_features.o := $(nostackp) | ||
6 | |||
4 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 7 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
5 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 8 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
6 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 9 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |