diff options
Diffstat (limited to 'drivers')
52 files changed, 646 insertions, 1011 deletions
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index a2bb4eccaab4..9aaf401a8975 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -384,9 +384,9 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata = | |||
384 | .device_id = PCI_DEVICE_ID_VIA_P4M800CE, | 384 | .device_id = PCI_DEVICE_ID_VIA_P4M800CE, |
385 | .chipset_name = "VT3314", | 385 | .chipset_name = "VT3314", |
386 | }, | 386 | }, |
387 | /* CX700 */ | 387 | /* VT3324 / CX700 */ |
388 | { | 388 | { |
389 | .device_id = PCI_DEVICE_ID_VIA_CX700, | 389 | .device_id = PCI_DEVICE_ID_VIA_VT3324, |
390 | .chipset_name = "CX700", | 390 | .chipset_name = "CX700", |
391 | }, | 391 | }, |
392 | /* VT3336 */ | 392 | /* VT3336 */ |
@@ -540,7 +540,7 @@ static const struct pci_device_id agp_via_pci_table[] = { | |||
540 | ID(PCI_DEVICE_ID_VIA_83_87XX_1), | 540 | ID(PCI_DEVICE_ID_VIA_83_87XX_1), |
541 | ID(PCI_DEVICE_ID_VIA_3296_0), | 541 | ID(PCI_DEVICE_ID_VIA_3296_0), |
542 | ID(PCI_DEVICE_ID_VIA_P4M800CE), | 542 | ID(PCI_DEVICE_ID_VIA_P4M800CE), |
543 | ID(PCI_DEVICE_ID_VIA_CX700), | 543 | ID(PCI_DEVICE_ID_VIA_VT3324), |
544 | ID(PCI_DEVICE_ID_VIA_VT3336), | 544 | ID(PCI_DEVICE_ID_VIA_VT3336), |
545 | ID(PCI_DEVICE_ID_VIA_P4M890), | 545 | ID(PCI_DEVICE_ID_VIA_P4M890), |
546 | { } | 546 | { } |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 147c12047cf3..41f78e2c158f 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -50,18 +50,10 @@ | |||
50 | #include <linux/poll.h> | 50 | #include <linux/poll.h> |
51 | #include <linux/string.h> | 51 | #include <linux/string.h> |
52 | #include <linux/ctype.h> | 52 | #include <linux/ctype.h> |
53 | #include <linux/delay.h> | ||
54 | #include <asm/atomic.h> | 53 | #include <asm/atomic.h> |
55 | 54 | ||
56 | #ifdef CONFIG_X86 | 55 | #ifdef CONFIG_X86_LOCAL_APIC |
57 | /* This is ugly, but I've determined that x86 is the only architecture | 56 | #include <asm/apic.h> |
58 | that can reasonably support the IPMI NMI watchdog timeout at this | ||
59 | time. If another architecture adds this capability somehow, it | ||
60 | will have to be a somewhat different mechanism and I have no idea | ||
61 | how it will work. So in the unlikely event that another | ||
62 | architecture supports this, we can figure out a good generic | ||
63 | mechanism for it at that time. */ | ||
64 | #define HAVE_DIE_NMI_POST | ||
65 | #endif | 57 | #endif |
66 | 58 | ||
67 | #define PFX "IPMI Watchdog: " | 59 | #define PFX "IPMI Watchdog: " |
@@ -327,11 +319,6 @@ static unsigned char ipmi_version_minor; | |||
327 | /* If a pretimeout occurs, this is used to allow only one panic to happen. */ | 319 | /* If a pretimeout occurs, this is used to allow only one panic to happen. */ |
328 | static atomic_t preop_panic_excl = ATOMIC_INIT(-1); | 320 | static atomic_t preop_panic_excl = ATOMIC_INIT(-1); |
329 | 321 | ||
330 | #ifdef HAVE_DIE_NMI_POST | ||
331 | static int testing_nmi; | ||
332 | static int nmi_handler_registered; | ||
333 | #endif | ||
334 | |||
335 | static int ipmi_heartbeat(void); | 322 | static int ipmi_heartbeat(void); |
336 | static void panic_halt_ipmi_heartbeat(void); | 323 | static void panic_halt_ipmi_heartbeat(void); |
337 | 324 | ||
@@ -373,10 +360,6 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | |||
373 | int hbnow = 0; | 360 | int hbnow = 0; |
374 | 361 | ||
375 | 362 | ||
376 | /* These can be cleared as we are setting the timeout. */ | ||
377 | ipmi_start_timer_on_heartbeat = 0; | ||
378 | pretimeout_since_last_heartbeat = 0; | ||
379 | |||
380 | data[0] = 0; | 363 | data[0] = 0; |
381 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); | 364 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); |
382 | 365 | ||
@@ -451,12 +434,13 @@ static int ipmi_set_timeout(int do_heartbeat) | |||
451 | 434 | ||
452 | wait_for_completion(&set_timeout_wait); | 435 | wait_for_completion(&set_timeout_wait); |
453 | 436 | ||
454 | mutex_unlock(&set_timeout_lock); | ||
455 | |||
456 | if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) | 437 | if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) |
457 | || ((send_heartbeat_now) | 438 | || ((send_heartbeat_now) |
458 | && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) | 439 | && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) |
440 | { | ||
459 | rv = ipmi_heartbeat(); | 441 | rv = ipmi_heartbeat(); |
442 | } | ||
443 | mutex_unlock(&set_timeout_lock); | ||
460 | 444 | ||
461 | out: | 445 | out: |
462 | return rv; | 446 | return rv; |
@@ -536,10 +520,12 @@ static int ipmi_heartbeat(void) | |||
536 | int rv; | 520 | int rv; |
537 | struct ipmi_system_interface_addr addr; | 521 | struct ipmi_system_interface_addr addr; |
538 | 522 | ||
539 | if (ipmi_ignore_heartbeat) | 523 | if (ipmi_ignore_heartbeat) { |
540 | return 0; | 524 | return 0; |
525 | } | ||
541 | 526 | ||
542 | if (ipmi_start_timer_on_heartbeat) { | 527 | if (ipmi_start_timer_on_heartbeat) { |
528 | ipmi_start_timer_on_heartbeat = 0; | ||
543 | ipmi_watchdog_state = action_val; | 529 | ipmi_watchdog_state = action_val; |
544 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 530 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
545 | } else if (pretimeout_since_last_heartbeat) { | 531 | } else if (pretimeout_since_last_heartbeat) { |
@@ -547,6 +533,7 @@ static int ipmi_heartbeat(void) | |||
547 | We don't want to set the action, though, we want to | 533 | We don't want to set the action, though, we want to |
548 | leave that alone (thus it can't be combined with the | 534 | leave that alone (thus it can't be combined with the |
549 | above operation. */ | 535 | above operation. */ |
536 | pretimeout_since_last_heartbeat = 0; | ||
550 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | 537 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); |
551 | } | 538 | } |
552 | 539 | ||
@@ -934,45 +921,6 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
934 | printk(KERN_CRIT PFX "Unable to register misc device\n"); | 921 | printk(KERN_CRIT PFX "Unable to register misc device\n"); |
935 | } | 922 | } |
936 | 923 | ||
937 | #ifdef HAVE_DIE_NMI_POST | ||
938 | if (nmi_handler_registered) { | ||
939 | int old_pretimeout = pretimeout; | ||
940 | int old_timeout = timeout; | ||
941 | int old_preop_val = preop_val; | ||
942 | |||
943 | /* Set the pretimeout to go off in a second and give | ||
944 | ourselves plenty of time to stop the timer. */ | ||
945 | ipmi_watchdog_state = WDOG_TIMEOUT_RESET; | ||
946 | preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ | ||
947 | pretimeout = 99; | ||
948 | timeout = 100; | ||
949 | |||
950 | testing_nmi = 1; | ||
951 | |||
952 | rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | ||
953 | if (rv) { | ||
954 | printk(KERN_WARNING PFX "Error starting timer to" | ||
955 | " test NMI: 0x%x. The NMI pretimeout will" | ||
956 | " likely not work\n", rv); | ||
957 | rv = 0; | ||
958 | goto out_restore; | ||
959 | } | ||
960 | |||
961 | msleep(1500); | ||
962 | |||
963 | if (testing_nmi != 2) { | ||
964 | printk(KERN_WARNING PFX "IPMI NMI didn't seem to" | ||
965 | " occur. The NMI pretimeout will" | ||
966 | " likely not work\n"); | ||
967 | } | ||
968 | out_restore: | ||
969 | testing_nmi = 0; | ||
970 | preop_val = old_preop_val; | ||
971 | pretimeout = old_pretimeout; | ||
972 | timeout = old_timeout; | ||
973 | } | ||
974 | #endif | ||
975 | |||
976 | out: | 924 | out: |
977 | up_write(®ister_sem); | 925 | up_write(®ister_sem); |
978 | 926 | ||
@@ -982,10 +930,6 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
982 | ipmi_watchdog_state = action_val; | 930 | ipmi_watchdog_state = action_val; |
983 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 931 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
984 | printk(KERN_INFO PFX "Starting now!\n"); | 932 | printk(KERN_INFO PFX "Starting now!\n"); |
985 | } else { | ||
986 | /* Stop the timer now. */ | ||
987 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | ||
988 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | ||
989 | } | 933 | } |
990 | } | 934 | } |
991 | 935 | ||
@@ -1022,28 +966,17 @@ static void ipmi_unregister_watchdog(int ipmi_intf) | |||
1022 | up_write(®ister_sem); | 966 | up_write(®ister_sem); |
1023 | } | 967 | } |
1024 | 968 | ||
1025 | #ifdef HAVE_DIE_NMI_POST | 969 | #ifdef HAVE_NMI_HANDLER |
1026 | static int | 970 | static int |
1027 | ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | 971 | ipmi_nmi(void *dev_id, int cpu, int handled) |
1028 | { | 972 | { |
1029 | if (val != DIE_NMI_POST) | ||
1030 | return NOTIFY_OK; | ||
1031 | |||
1032 | if (testing_nmi) { | ||
1033 | testing_nmi = 2; | ||
1034 | return NOTIFY_STOP; | ||
1035 | } | ||
1036 | |||
1037 | /* If we are not expecting a timeout, ignore it. */ | 973 | /* If we are not expecting a timeout, ignore it. */ |
1038 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 974 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
1039 | return NOTIFY_OK; | 975 | return NOTIFY_DONE; |
1040 | |||
1041 | if (preaction_val != WDOG_PRETIMEOUT_NMI) | ||
1042 | return NOTIFY_OK; | ||
1043 | 976 | ||
1044 | /* If no one else handled the NMI, we assume it was the IPMI | 977 | /* If no one else handled the NMI, we assume it was the IPMI |
1045 | watchdog. */ | 978 | watchdog. */ |
1046 | if (preop_val == WDOG_PREOP_PANIC) { | 979 | if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) { |
1047 | /* On some machines, the heartbeat will give | 980 | /* On some machines, the heartbeat will give |
1048 | an error and not work unless we re-enable | 981 | an error and not work unless we re-enable |
1049 | the timer. So do so. */ | 982 | the timer. So do so. */ |
@@ -1052,12 +985,18 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | |||
1052 | panic(PFX "pre-timeout"); | 985 | panic(PFX "pre-timeout"); |
1053 | } | 986 | } |
1054 | 987 | ||
1055 | return NOTIFY_STOP; | 988 | return NOTIFY_DONE; |
1056 | } | 989 | } |
1057 | 990 | ||
1058 | static struct notifier_block ipmi_nmi_handler = { | 991 | static struct nmi_handler ipmi_nmi_handler = |
1059 | .notifier_call = ipmi_nmi | 992 | { |
993 | .link = LIST_HEAD_INIT(ipmi_nmi_handler.link), | ||
994 | .dev_name = "ipmi_watchdog", | ||
995 | .dev_id = NULL, | ||
996 | .handler = ipmi_nmi, | ||
997 | .priority = 0, /* Call us last. */ | ||
1060 | }; | 998 | }; |
999 | int nmi_handler_registered; | ||
1061 | #endif | 1000 | #endif |
1062 | 1001 | ||
1063 | static int wdog_reboot_handler(struct notifier_block *this, | 1002 | static int wdog_reboot_handler(struct notifier_block *this, |
@@ -1174,7 +1113,7 @@ static int preaction_op(const char *inval, char *outval) | |||
1174 | preaction_val = WDOG_PRETIMEOUT_NONE; | 1113 | preaction_val = WDOG_PRETIMEOUT_NONE; |
1175 | else if (strcmp(inval, "pre_smi") == 0) | 1114 | else if (strcmp(inval, "pre_smi") == 0) |
1176 | preaction_val = WDOG_PRETIMEOUT_SMI; | 1115 | preaction_val = WDOG_PRETIMEOUT_SMI; |
1177 | #ifdef HAVE_DIE_NMI_POST | 1116 | #ifdef HAVE_NMI_HANDLER |
1178 | else if (strcmp(inval, "pre_nmi") == 0) | 1117 | else if (strcmp(inval, "pre_nmi") == 0) |
1179 | preaction_val = WDOG_PRETIMEOUT_NMI; | 1118 | preaction_val = WDOG_PRETIMEOUT_NMI; |
1180 | #endif | 1119 | #endif |
@@ -1208,7 +1147,7 @@ static int preop_op(const char *inval, char *outval) | |||
1208 | 1147 | ||
1209 | static void check_parms(void) | 1148 | static void check_parms(void) |
1210 | { | 1149 | { |
1211 | #ifdef HAVE_DIE_NMI_POST | 1150 | #ifdef HAVE_NMI_HANDLER |
1212 | int do_nmi = 0; | 1151 | int do_nmi = 0; |
1213 | int rv; | 1152 | int rv; |
1214 | 1153 | ||
@@ -1221,9 +1160,20 @@ static void check_parms(void) | |||
1221 | preop_op("preop_none", NULL); | 1160 | preop_op("preop_none", NULL); |
1222 | do_nmi = 0; | 1161 | do_nmi = 0; |
1223 | } | 1162 | } |
1163 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1164 | if (nmi_watchdog == NMI_IO_APIC) { | ||
1165 | printk(KERN_WARNING PFX "nmi_watchdog is set to IO APIC" | ||
1166 | " mode (value is %d), that is incompatible" | ||
1167 | " with using NMI in the IPMI watchdog." | ||
1168 | " Disabling IPMI nmi pretimeout.\n", | ||
1169 | nmi_watchdog); | ||
1170 | preaction_val = WDOG_PRETIMEOUT_NONE; | ||
1171 | do_nmi = 0; | ||
1172 | } | ||
1173 | #endif | ||
1224 | } | 1174 | } |
1225 | if (do_nmi && !nmi_handler_registered) { | 1175 | if (do_nmi && !nmi_handler_registered) { |
1226 | rv = register_die_notifier(&ipmi_nmi_handler); | 1176 | rv = request_nmi(&ipmi_nmi_handler); |
1227 | if (rv) { | 1177 | if (rv) { |
1228 | printk(KERN_WARNING PFX | 1178 | printk(KERN_WARNING PFX |
1229 | "Can't register nmi handler\n"); | 1179 | "Can't register nmi handler\n"); |
@@ -1231,7 +1181,7 @@ static void check_parms(void) | |||
1231 | } else | 1181 | } else |
1232 | nmi_handler_registered = 1; | 1182 | nmi_handler_registered = 1; |
1233 | } else if (!do_nmi && nmi_handler_registered) { | 1183 | } else if (!do_nmi && nmi_handler_registered) { |
1234 | unregister_die_notifier(&ipmi_nmi_handler); | 1184 | release_nmi(&ipmi_nmi_handler); |
1235 | nmi_handler_registered = 0; | 1185 | nmi_handler_registered = 0; |
1236 | } | 1186 | } |
1237 | #endif | 1187 | #endif |
@@ -1267,9 +1217,9 @@ static int __init ipmi_wdog_init(void) | |||
1267 | 1217 | ||
1268 | rv = ipmi_smi_watcher_register(&smi_watcher); | 1218 | rv = ipmi_smi_watcher_register(&smi_watcher); |
1269 | if (rv) { | 1219 | if (rv) { |
1270 | #ifdef HAVE_DIE_NMI_POST | 1220 | #ifdef HAVE_NMI_HANDLER |
1271 | if (nmi_handler_registered) | 1221 | if (preaction_val == WDOG_PRETIMEOUT_NMI) |
1272 | unregister_die_notifier(&ipmi_nmi_handler); | 1222 | release_nmi(&ipmi_nmi_handler); |
1273 | #endif | 1223 | #endif |
1274 | atomic_notifier_chain_unregister(&panic_notifier_list, | 1224 | atomic_notifier_chain_unregister(&panic_notifier_list, |
1275 | &wdog_panic_notifier); | 1225 | &wdog_panic_notifier); |
@@ -1288,9 +1238,9 @@ static void __exit ipmi_wdog_exit(void) | |||
1288 | ipmi_smi_watcher_unregister(&smi_watcher); | 1238 | ipmi_smi_watcher_unregister(&smi_watcher); |
1289 | ipmi_unregister_watchdog(watchdog_ifnum); | 1239 | ipmi_unregister_watchdog(watchdog_ifnum); |
1290 | 1240 | ||
1291 | #ifdef HAVE_DIE_NMI_POST | 1241 | #ifdef HAVE_NMI_HANDLER |
1292 | if (nmi_handler_registered) | 1242 | if (nmi_handler_registered) |
1293 | unregister_die_notifier(&ipmi_nmi_handler); | 1243 | release_nmi(&ipmi_nmi_handler); |
1294 | #endif | 1244 | #endif |
1295 | 1245 | ||
1296 | atomic_notifier_chain_unregister(&panic_notifier_list, | 1246 | atomic_notifier_chain_unregister(&panic_notifier_list, |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9040809d2c25..b1a9b81c211f 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -4,13 +4,10 @@ | |||
4 | # Andre Hedrick <andre@linux-ide.org> | 4 | # Andre Hedrick <andre@linux-ide.org> |
5 | # | 5 | # |
6 | 6 | ||
7 | if BLOCK | 7 | menuconfig IDE |
8 | |||
9 | menu "ATA/ATAPI/MFM/RLL support" | ||
10 | depends on HAS_IOMEM | ||
11 | |||
12 | config IDE | ||
13 | tristate "ATA/ATAPI/MFM/RLL support" | 8 | tristate "ATA/ATAPI/MFM/RLL support" |
9 | depends on BLOCK | ||
10 | depends on HAS_IOMEM | ||
14 | ---help--- | 11 | ---help--- |
15 | If you say Y here, your kernel will be able to manage low cost mass | 12 | If you say Y here, your kernel will be able to manage low cost mass |
16 | storage units such as ATA/(E)IDE and ATAPI units. The most common | 13 | storage units such as ATA/(E)IDE and ATAPI units. The most common |
@@ -1099,8 +1096,4 @@ config BLK_DEV_HD_ONLY | |||
1099 | config BLK_DEV_HD | 1096 | config BLK_DEV_HD |
1100 | def_bool BLK_DEV_HD_IDE || BLK_DEV_HD_ONLY | 1097 | def_bool BLK_DEV_HD_IDE || BLK_DEV_HD_ONLY |
1101 | 1098 | ||
1102 | endif | 1099 | endif # IDE |
1103 | |||
1104 | endmenu | ||
1105 | |||
1106 | endif | ||
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index c04cb25a01ff..ca0341c05e55 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c | |||
@@ -1002,18 +1002,6 @@ static int cris_ide_build_dmatable (ide_drive_t *drive) | |||
1002 | return 1; /* let the PIO routines handle this weirdness */ | 1002 | return 1; /* let the PIO routines handle this weirdness */ |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static int cris_config_drive_for_dma (ide_drive_t *drive) | ||
1006 | { | ||
1007 | u8 speed = ide_max_dma_mode(drive); | ||
1008 | |||
1009 | if (!speed) | ||
1010 | return 0; | ||
1011 | |||
1012 | speed_cris_ide(drive, speed); | ||
1013 | |||
1014 | return ide_dma_enable(drive); | ||
1015 | } | ||
1016 | |||
1017 | /* | 1005 | /* |
1018 | * cris_dma_intr() is the handler for disk read/write DMA interrupts | 1006 | * cris_dma_intr() is the handler for disk read/write DMA interrupts |
1019 | */ | 1007 | */ |
@@ -1043,7 +1031,7 @@ static ide_startstop_t cris_dma_intr (ide_drive_t *drive) | |||
1043 | 1031 | ||
1044 | static int cris_dma_check(ide_drive_t *drive) | 1032 | static int cris_dma_check(ide_drive_t *drive) |
1045 | { | 1033 | { |
1046 | if (ide_use_dma(drive) && cris_config_drive_for_dma(drive)) | 1034 | if (ide_tune_dma(drive)) |
1047 | return 0; | 1035 | return 0; |
1048 | 1036 | ||
1049 | return -1; | 1037 | return -1; |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 5fe85191d49c..b77b7d138c49 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -670,41 +670,6 @@ int __ide_dma_good_drive (ide_drive_t *drive) | |||
670 | 670 | ||
671 | EXPORT_SYMBOL(__ide_dma_good_drive); | 671 | EXPORT_SYMBOL(__ide_dma_good_drive); |
672 | 672 | ||
673 | int ide_use_dma(ide_drive_t *drive) | ||
674 | { | ||
675 | struct hd_driveid *id = drive->id; | ||
676 | ide_hwif_t *hwif = drive->hwif; | ||
677 | |||
678 | if ((id->capability & 1) == 0 || drive->autodma == 0) | ||
679 | return 0; | ||
680 | |||
681 | /* consult the list of known "bad" drives */ | ||
682 | if (__ide_dma_bad_drive(drive)) | ||
683 | return 0; | ||
684 | |||
685 | /* capable of UltraDMA modes */ | ||
686 | if (id->field_valid & 4) { | ||
687 | if (hwif->ultra_mask & id->dma_ultra) | ||
688 | return 1; | ||
689 | } | ||
690 | |||
691 | /* capable of regular DMA modes */ | ||
692 | if (id->field_valid & 2) { | ||
693 | if (hwif->mwdma_mask & id->dma_mword) | ||
694 | return 1; | ||
695 | if (hwif->swdma_mask & id->dma_1word) | ||
696 | return 1; | ||
697 | } | ||
698 | |||
699 | /* consult the list of known "good" drives */ | ||
700 | if (__ide_dma_good_drive(drive) && id->eide_dma_time < 150) | ||
701 | return 1; | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | EXPORT_SYMBOL_GPL(ide_use_dma); | ||
707 | |||
708 | static const u8 xfer_mode_bases[] = { | 673 | static const u8 xfer_mode_bases[] = { |
709 | XFER_UDMA_0, | 674 | XFER_UDMA_0, |
710 | XFER_MW_DMA_0, | 675 | XFER_MW_DMA_0, |
@@ -731,10 +696,12 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base) | |||
731 | mask &= 0x07; | 696 | mask &= 0x07; |
732 | break; | 697 | break; |
733 | case XFER_MW_DMA_0: | 698 | case XFER_MW_DMA_0: |
734 | mask = id->dma_mword & hwif->mwdma_mask; | 699 | if (id->field_valid & 2) |
700 | mask = id->dma_mword & hwif->mwdma_mask; | ||
735 | break; | 701 | break; |
736 | case XFER_SW_DMA_0: | 702 | case XFER_SW_DMA_0: |
737 | mask = id->dma_1word & hwif->swdma_mask; | 703 | if (id->field_valid & 2) |
704 | mask = id->dma_1word & hwif->swdma_mask; | ||
738 | break; | 705 | break; |
739 | default: | 706 | default: |
740 | BUG(); | 707 | BUG(); |
@@ -783,8 +750,11 @@ int ide_tune_dma(ide_drive_t *drive) | |||
783 | { | 750 | { |
784 | u8 speed; | 751 | u8 speed; |
785 | 752 | ||
786 | /* TODO: use only ide_max_dma_mode() */ | 753 | if ((drive->id->capability & 1) == 0 || drive->autodma == 0) |
787 | if (!ide_use_dma(drive)) | 754 | return 0; |
755 | |||
756 | /* consult the list of known "bad" drives */ | ||
757 | if (__ide_dma_bad_drive(drive)) | ||
788 | return 0; | 758 | return 0; |
789 | 759 | ||
790 | speed = ide_max_dma_mode(drive); | 760 | speed = ide_max_dma_mode(drive); |
@@ -792,9 +762,10 @@ int ide_tune_dma(ide_drive_t *drive) | |||
792 | if (!speed) | 762 | if (!speed) |
793 | return 0; | 763 | return 0; |
794 | 764 | ||
795 | drive->hwif->speedproc(drive, speed); | 765 | if (drive->hwif->speedproc(drive, speed)) |
766 | return 0; | ||
796 | 767 | ||
797 | return ide_dma_enable(drive); | 768 | return 1; |
798 | } | 769 | } |
799 | 770 | ||
800 | EXPORT_SYMBOL_GPL(ide_tune_dma); | 771 | EXPORT_SYMBOL_GPL(ide_tune_dma); |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8e568143d90d..bfe8f1b712ba 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -223,6 +223,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * | |||
223 | break; | 223 | break; |
224 | if (drive->hwif->ide_dma_check == NULL) | 224 | if (drive->hwif->ide_dma_check == NULL) |
225 | break; | 225 | break; |
226 | drive->hwif->dma_off_quietly(drive); | ||
226 | ide_set_dma(drive); | 227 | ide_set_dma(drive); |
227 | break; | 228 | break; |
228 | } | 229 | } |
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 3be3c69383f2..074bb32a4a40 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -111,18 +111,6 @@ u8 ide_rate_filter(ide_drive_t *drive, u8 speed) | |||
111 | 111 | ||
112 | EXPORT_SYMBOL(ide_rate_filter); | 112 | EXPORT_SYMBOL(ide_rate_filter); |
113 | 113 | ||
114 | int ide_dma_enable (ide_drive_t *drive) | ||
115 | { | ||
116 | ide_hwif_t *hwif = HWIF(drive); | ||
117 | struct hd_driveid *id = drive->id; | ||
118 | |||
119 | return ((int) ((((id->dma_ultra >> 8) & hwif->ultra_mask) || | ||
120 | ((id->dma_mword >> 8) & hwif->mwdma_mask) || | ||
121 | ((id->dma_1word >> 8) & hwif->swdma_mask)) ? 1 : 0)); | ||
122 | } | ||
123 | |||
124 | EXPORT_SYMBOL(ide_dma_enable); | ||
125 | |||
126 | int ide_use_fast_pio(ide_drive_t *drive) | 114 | int ide_use_fast_pio(ide_drive_t *drive) |
127 | { | 115 | { |
128 | struct hd_driveid *id = drive->id; | 116 | struct hd_driveid *id = drive->id; |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index f2b547ff7722..6002713a20a1 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -910,6 +910,7 @@ int set_using_dma(ide_drive_t *drive, int arg) | |||
910 | err = 0; | 910 | err = 0; |
911 | 911 | ||
912 | if (arg) { | 912 | if (arg) { |
913 | hwif->dma_off_quietly(drive); | ||
913 | if (ide_set_dma(drive) || hwif->ide_dma_on(drive)) | 914 | if (ide_set_dma(drive) || hwif->ide_dma_on(drive)) |
914 | err = -EIO; | 915 | err = -EIO; |
915 | } else | 916 | } else |
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 428efdae0c7b..27525ec2e19a 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c | |||
@@ -455,28 +455,6 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
455 | return (ide_config_drive_speed(drive, speed)); | 455 | return (ide_config_drive_speed(drive, speed)); |
456 | } | 456 | } |
457 | 457 | ||
458 | |||
459 | /** | ||
460 | * config_chipset_for_dma - set up DMA mode | ||
461 | * @drive: drive to configure for | ||
462 | * | ||
463 | * Place a drive into DMA mode and tune the chipset for | ||
464 | * the selected speed. | ||
465 | * | ||
466 | * Returns true if DMA mode can be used | ||
467 | */ | ||
468 | |||
469 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
470 | { | ||
471 | u8 speed = ide_max_dma_mode(drive); | ||
472 | |||
473 | if (!(speed)) | ||
474 | return 0; | ||
475 | |||
476 | (void) ali15x3_tune_chipset(drive, speed); | ||
477 | return ide_dma_enable(drive); | ||
478 | } | ||
479 | |||
480 | /** | 458 | /** |
481 | * ali15x3_config_drive_for_dma - configure for DMA | 459 | * ali15x3_config_drive_for_dma - configure for DMA |
482 | * @drive: drive to configure | 460 | * @drive: drive to configure |
@@ -487,48 +465,14 @@ static int config_chipset_for_dma (ide_drive_t *drive) | |||
487 | 465 | ||
488 | static int ali15x3_config_drive_for_dma(ide_drive_t *drive) | 466 | static int ali15x3_config_drive_for_dma(ide_drive_t *drive) |
489 | { | 467 | { |
490 | ide_hwif_t *hwif = HWIF(drive); | ||
491 | struct hd_driveid *id = drive->id; | ||
492 | |||
493 | if ((m5229_revision<=0x20) && (drive->media!=ide_disk)) | ||
494 | goto ata_pio; | ||
495 | |||
496 | drive->init_speed = 0; | 468 | drive->init_speed = 0; |
497 | 469 | ||
498 | if ((id != NULL) && ((id->capability & 1) != 0) && drive->autodma) { | 470 | if (ide_tune_dma(drive)) |
499 | /* Consult the list of known "bad" drives */ | 471 | return 0; |
500 | if (__ide_dma_bad_drive(drive)) | ||
501 | goto ata_pio; | ||
502 | if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) { | ||
503 | if (id->dma_ultra & hwif->ultra_mask) { | ||
504 | /* Force if Capable UltraDMA */ | ||
505 | int dma = config_chipset_for_dma(drive); | ||
506 | if ((id->field_valid & 2) && !dma) | ||
507 | goto try_dma_modes; | ||
508 | } | ||
509 | } else if (id->field_valid & 2) { | ||
510 | try_dma_modes: | ||
511 | if ((id->dma_mword & hwif->mwdma_mask) || | ||
512 | (id->dma_1word & hwif->swdma_mask)) { | ||
513 | /* Force if Capable regular DMA modes */ | ||
514 | if (!config_chipset_for_dma(drive)) | ||
515 | goto ata_pio; | ||
516 | } | ||
517 | } else if (__ide_dma_good_drive(drive) && | ||
518 | (id->eide_dma_time < 150)) { | ||
519 | /* Consult the list of known "good" drives */ | ||
520 | if (!config_chipset_for_dma(drive)) | ||
521 | goto ata_pio; | ||
522 | } else { | ||
523 | goto ata_pio; | ||
524 | } | ||
525 | } else { | ||
526 | ata_pio: | ||
527 | hwif->tuneproc(drive, 255); | ||
528 | return -1; | ||
529 | } | ||
530 | 472 | ||
531 | return 0; | 473 | ali15x3_tune_drive(drive, 255); |
474 | |||
475 | return -1; | ||
532 | } | 476 | } |
533 | 477 | ||
534 | /** | 478 | /** |
@@ -739,7 +683,8 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif) | |||
739 | return; | 683 | return; |
740 | } | 684 | } |
741 | 685 | ||
742 | hwif->atapi_dma = 1; | 686 | if (m5229_revision > 0x20) |
687 | hwif->atapi_dma = 1; | ||
743 | 688 | ||
744 | if (m5229_revision <= 0x20) | 689 | if (m5229_revision <= 0x20) |
745 | hwif->ultra_mask = 0x00; /* no udma */ | 690 | hwif->ultra_mask = 0x00; /* no udma */ |
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 61ea96b5555c..7c57dc696f52 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c | |||
@@ -352,22 +352,9 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed) | |||
352 | return ide_config_drive_speed(drive, speed); | 352 | return ide_config_drive_speed(drive, speed); |
353 | } | 353 | } |
354 | 354 | ||
355 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
356 | { | ||
357 | u8 speed = ide_max_dma_mode(drive); | ||
358 | |||
359 | if (!speed) | ||
360 | return 0; | ||
361 | |||
362 | if (cmd64x_tune_chipset(drive, speed)) | ||
363 | return 0; | ||
364 | |||
365 | return ide_dma_enable(drive); | ||
366 | } | ||
367 | |||
368 | static int cmd64x_config_drive_for_dma (ide_drive_t *drive) | 355 | static int cmd64x_config_drive_for_dma (ide_drive_t *drive) |
369 | { | 356 | { |
370 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 357 | if (ide_tune_dma(drive)) |
371 | return 0; | 358 | return 0; |
372 | 359 | ||
373 | if (ide_use_fast_pio(drive)) | 360 | if (ide_use_fast_pio(drive)) |
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c index b2d7c132ef4b..1eec1f308d16 100644 --- a/drivers/ide/pci/cs5530.c +++ b/drivers/ide/pci/cs5530.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/cs5530.c Version 0.7 Sept 10, 2002 | 2 | * linux/drivers/ide/pci/cs5530.c Version 0.73 Mar 10 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org> |
5 | * Ditto of GNU General Public License. | ||
6 | * | ||
7 | * Copyright (C) 2000 Mark Lord <mlord@pobox.com> | 5 | * Copyright (C) 2000 Mark Lord <mlord@pobox.com> |
6 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
7 | * | ||
8 | * May be copied or modified under the terms of the GNU General Public License | 8 | * May be copied or modified under the terms of the GNU General Public License |
9 | * | 9 | * |
10 | * Development of this chipset driver was funded | 10 | * Development of this chipset driver was funded |
@@ -62,6 +62,14 @@ static unsigned int cs5530_pio_timings[2][5] = { | |||
62 | #define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132) | 62 | #define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132) |
63 | #define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20)) | 63 | #define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20)) |
64 | 64 | ||
65 | static void cs5530_tunepio(ide_drive_t *drive, u8 pio) | ||
66 | { | ||
67 | unsigned long basereg = CS5530_BASEREG(drive->hwif); | ||
68 | unsigned int format = (inl(basereg + 4) >> 31) & 1; | ||
69 | |||
70 | outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3)); | ||
71 | } | ||
72 | |||
65 | /** | 73 | /** |
66 | * cs5530_tuneproc - select/set PIO modes | 74 | * cs5530_tuneproc - select/set PIO modes |
67 | * | 75 | * |
@@ -74,98 +82,78 @@ static unsigned int cs5530_pio_timings[2][5] = { | |||
74 | 82 | ||
75 | static void cs5530_tuneproc (ide_drive_t *drive, u8 pio) /* pio=255 means "autotune" */ | 83 | static void cs5530_tuneproc (ide_drive_t *drive, u8 pio) /* pio=255 means "autotune" */ |
76 | { | 84 | { |
77 | ide_hwif_t *hwif = HWIF(drive); | ||
78 | unsigned int format; | ||
79 | unsigned long basereg = CS5530_BASEREG(hwif); | ||
80 | static u8 modes[5] = { XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, XFER_PIO_4}; | ||
81 | |||
82 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); | 85 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
83 | if (!cs5530_set_xfer_mode(drive, modes[pio])) { | 86 | |
84 | format = (inl(basereg + 4) >> 31) & 1; | 87 | if (cs5530_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0) |
85 | outl(cs5530_pio_timings[format][pio], | 88 | cs5530_tunepio(drive, pio); |
86 | basereg+(drive->select.b.unit<<3)); | 89 | } |
90 | |||
91 | /** | ||
92 | * cs5530_udma_filter - UDMA filter | ||
93 | * @drive: drive | ||
94 | * | ||
95 | * cs5530_udma_filter() does UDMA mask filtering for the given drive | ||
96 | * taking into the consideration capabilities of the mate device. | ||
97 | * | ||
98 | * The CS5530 specifies that two drives sharing a cable cannot mix | ||
99 | * UDMA/MDMA. It has to be one or the other, for the pair, though | ||
100 | * different timings can still be chosen for each drive. We could | ||
101 | * set the appropriate timing bits on the fly, but that might be | ||
102 | * a bit confusing. So, for now we statically handle this requirement | ||
103 | * by looking at our mate drive to see what it is capable of, before | ||
104 | * choosing a mode for our own drive. | ||
105 | * | ||
106 | * Note: This relies on the fact we never fail from UDMA to MWDMA2 | ||
107 | * but instead drop to PIO. | ||
108 | */ | ||
109 | |||
110 | static u8 cs5530_udma_filter(ide_drive_t *drive) | ||
111 | { | ||
112 | ide_hwif_t *hwif = drive->hwif; | ||
113 | ide_drive_t *mate = &hwif->drives[(drive->dn & 1) ^ 1]; | ||
114 | struct hd_driveid *mateid = mate->id; | ||
115 | u8 mask = hwif->ultra_mask; | ||
116 | |||
117 | if (mate->present == 0) | ||
118 | goto out; | ||
119 | |||
120 | if ((mateid->capability & 1) && __ide_dma_bad_drive(mate) == 0) { | ||
121 | if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7)) | ||
122 | goto out; | ||
123 | if ((mateid->field_valid & 2) && (mateid->dma_mword & 7)) | ||
124 | mask = 0; | ||
87 | } | 125 | } |
126 | out: | ||
127 | return mask; | ||
88 | } | 128 | } |
89 | 129 | ||
90 | /** | 130 | /** |
91 | * cs5530_config_dma - select/set DMA and UDMA modes | 131 | * cs5530_config_dma - set DMA/UDMA mode |
92 | * @drive: drive to tune | 132 | * @drive: drive to tune |
93 | * | 133 | * |
94 | * cs5530_config_dma() handles selection/setting of DMA/UDMA modes | 134 | * cs5530_config_dma() handles setting of DMA/UDMA mode |
95 | * for both the chipset and drive. The CS5530 has limitations about | 135 | * for both the chipset and drive. |
96 | * mixing DMA/UDMA on the same cable. | ||
97 | */ | 136 | */ |
98 | 137 | ||
99 | static int cs5530_config_dma (ide_drive_t *drive) | 138 | static int cs5530_config_dma(ide_drive_t *drive) |
100 | { | 139 | { |
101 | int udma_ok = 1, mode = 0; | 140 | if (ide_tune_dma(drive)) |
102 | ide_hwif_t *hwif = HWIF(drive); | 141 | return 0; |
103 | int unit = drive->select.b.unit; | ||
104 | ide_drive_t *mate = &hwif->drives[unit^1]; | ||
105 | struct hd_driveid *id = drive->id; | ||
106 | unsigned int reg, timings = 0; | ||
107 | unsigned long basereg; | ||
108 | 142 | ||
109 | /* | 143 | return 1; |
110 | * Default to DMA-off in case we run into trouble here. | 144 | } |
111 | */ | ||
112 | hwif->dma_off_quietly(drive); | ||
113 | 145 | ||
114 | /* | 146 | static int cs5530_tune_chipset(ide_drive_t *drive, u8 mode) |
115 | * The CS5530 specifies that two drives sharing a cable cannot | 147 | { |
116 | * mix UDMA/MDMA. It has to be one or the other, for the pair, | 148 | unsigned long basereg; |
117 | * though different timings can still be chosen for each drive. | 149 | unsigned int reg, timings = 0; |
118 | * We could set the appropriate timing bits on the fly, | ||
119 | * but that might be a bit confusing. So, for now we statically | ||
120 | * handle this requirement by looking at our mate drive to see | ||
121 | * what it is capable of, before choosing a mode for our own drive. | ||
122 | * | ||
123 | * Note: This relies on the fact we never fail from UDMA to MWDMA_2 | ||
124 | * but instead drop to PIO | ||
125 | */ | ||
126 | if (mate->present) { | ||
127 | struct hd_driveid *mateid = mate->id; | ||
128 | if (mateid && (mateid->capability & 1) && | ||
129 | !__ide_dma_bad_drive(mate)) { | ||
130 | if ((mateid->field_valid & 4) && | ||
131 | (mateid->dma_ultra & 7)) | ||
132 | udma_ok = 1; | ||
133 | else if ((mateid->field_valid & 2) && | ||
134 | (mateid->dma_mword & 7)) | ||
135 | udma_ok = 0; | ||
136 | else | ||
137 | udma_ok = 1; | ||
138 | } | ||
139 | } | ||
140 | 150 | ||
141 | /* | 151 | mode = ide_rate_filter(drive, mode); |
142 | * Now see what the current drive is capable of, | ||
143 | * selecting UDMA only if the mate said it was ok. | ||
144 | */ | ||
145 | if (id && (id->capability & 1) && drive->autodma && | ||
146 | !__ide_dma_bad_drive(drive)) { | ||
147 | if (udma_ok && (id->field_valid & 4) && (id->dma_ultra & 7)) { | ||
148 | if (id->dma_ultra & 4) | ||
149 | mode = XFER_UDMA_2; | ||
150 | else if (id->dma_ultra & 2) | ||
151 | mode = XFER_UDMA_1; | ||
152 | else if (id->dma_ultra & 1) | ||
153 | mode = XFER_UDMA_0; | ||
154 | } | ||
155 | if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) { | ||
156 | if (id->dma_mword & 4) | ||
157 | mode = XFER_MW_DMA_2; | ||
158 | else if (id->dma_mword & 2) | ||
159 | mode = XFER_MW_DMA_1; | ||
160 | else if (id->dma_mword & 1) | ||
161 | mode = XFER_MW_DMA_0; | ||
162 | } | ||
163 | } | ||
164 | 152 | ||
165 | /* | 153 | /* |
166 | * Tell the drive to switch to the new mode; abort on failure. | 154 | * Tell the drive to switch to the new mode; abort on failure. |
167 | */ | 155 | */ |
168 | if (!mode || cs5530_set_xfer_mode(drive, mode)) | 156 | if (cs5530_set_xfer_mode(drive, mode)) |
169 | return 1; /* failure */ | 157 | return 1; /* failure */ |
170 | 158 | ||
171 | /* | 159 | /* |
@@ -178,14 +166,21 @@ static int cs5530_config_dma (ide_drive_t *drive) | |||
178 | case XFER_MW_DMA_0: timings = 0x00077771; break; | 166 | case XFER_MW_DMA_0: timings = 0x00077771; break; |
179 | case XFER_MW_DMA_1: timings = 0x00012121; break; | 167 | case XFER_MW_DMA_1: timings = 0x00012121; break; |
180 | case XFER_MW_DMA_2: timings = 0x00002020; break; | 168 | case XFER_MW_DMA_2: timings = 0x00002020; break; |
169 | case XFER_PIO_4: | ||
170 | case XFER_PIO_3: | ||
171 | case XFER_PIO_2: | ||
172 | case XFER_PIO_1: | ||
173 | case XFER_PIO_0: | ||
174 | cs5530_tunepio(drive, mode - XFER_PIO_0); | ||
175 | return 0; | ||
181 | default: | 176 | default: |
182 | BUG(); | 177 | BUG(); |
183 | break; | 178 | break; |
184 | } | 179 | } |
185 | basereg = CS5530_BASEREG(hwif); | 180 | basereg = CS5530_BASEREG(drive->hwif); |
186 | reg = inl(basereg + 4); /* get drive0 config register */ | 181 | reg = inl(basereg + 4); /* get drive0 config register */ |
187 | timings |= reg & 0x80000000; /* preserve PIO format bit */ | 182 | timings |= reg & 0x80000000; /* preserve PIO format bit */ |
188 | if (unit == 0) { /* are we configuring drive0? */ | 183 | if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */ |
189 | outl(timings, basereg + 4); /* write drive0 config register */ | 184 | outl(timings, basereg + 4); /* write drive0 config register */ |
190 | } else { | 185 | } else { |
191 | if (timings & 0x00100000) | 186 | if (timings & 0x00100000) |
@@ -311,6 +306,8 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif) | |||
311 | hwif->serialized = hwif->mate->serialized = 1; | 306 | hwif->serialized = hwif->mate->serialized = 1; |
312 | 307 | ||
313 | hwif->tuneproc = &cs5530_tuneproc; | 308 | hwif->tuneproc = &cs5530_tuneproc; |
309 | hwif->speedproc = &cs5530_tune_chipset; | ||
310 | |||
314 | basereg = CS5530_BASEREG(hwif); | 311 | basereg = CS5530_BASEREG(hwif); |
315 | d0_timings = inl(basereg + 0); | 312 | d0_timings = inl(basereg + 0); |
316 | if (CS5530_BAD_PIO(d0_timings)) { | 313 | if (CS5530_BAD_PIO(d0_timings)) { |
@@ -332,6 +329,7 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif) | |||
332 | hwif->ultra_mask = 0x07; | 329 | hwif->ultra_mask = 0x07; |
333 | hwif->mwdma_mask = 0x07; | 330 | hwif->mwdma_mask = 0x07; |
334 | 331 | ||
332 | hwif->udma_filter = cs5530_udma_filter; | ||
335 | hwif->ide_dma_check = &cs5530_config_dma; | 333 | hwif->ide_dma_check = &cs5530_config_dma; |
336 | if (!noautodma) | 334 | if (!noautodma) |
337 | hwif->autodma = 1; | 335 | hwif->autodma = 1; |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 442f658c6ae7..5faaff87d580 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -464,25 +464,6 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | /** | 466 | /** |
467 | * config_chipset_for_dma - configure for DMA | ||
468 | * @drive: drive to configure | ||
469 | * | ||
470 | * Called by the IDE layer when it wants the timings set up. | ||
471 | */ | ||
472 | |||
473 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
474 | { | ||
475 | u8 speed = ide_max_dma_mode(drive); | ||
476 | |||
477 | if (speed == 0) | ||
478 | return 0; | ||
479 | |||
480 | it821x_tune_chipset(drive, speed); | ||
481 | |||
482 | return ide_dma_enable(drive); | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * it821x_configure_drive_for_dma - set up for DMA transfers | 467 | * it821x_configure_drive_for_dma - set up for DMA transfers |
487 | * @drive: drive we are going to set up | 468 | * @drive: drive we are going to set up |
488 | * | 469 | * |
@@ -494,7 +475,7 @@ static int config_chipset_for_dma (ide_drive_t *drive) | |||
494 | 475 | ||
495 | static int it821x_config_drive_for_dma (ide_drive_t *drive) | 476 | static int it821x_config_drive_for_dma (ide_drive_t *drive) |
496 | { | 477 | { |
497 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 478 | if (ide_tune_dma(drive)) |
498 | return 0; | 479 | return 0; |
499 | 480 | ||
500 | it821x_tuneproc(drive, 255); | 481 | it821x_tuneproc(drive, 255); |
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index 65b1e124edf7..cc0bfdcf1f19 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c | |||
@@ -228,38 +228,11 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif) | |||
228 | return get_indexed_reg(hwif, 0x0b) & 0x04; | 228 | return get_indexed_reg(hwif, 0x0b) & 0x04; |
229 | } | 229 | } |
230 | 230 | ||
231 | static int config_chipset_for_dma(ide_drive_t *drive) | ||
232 | { | ||
233 | struct hd_driveid *id = drive->id; | ||
234 | ide_hwif_t *hwif = HWIF(drive); | ||
235 | u8 speed; | ||
236 | |||
237 | if (id->capability & 4) { | ||
238 | /* | ||
239 | * Set IORDY_EN & PREFETCH_EN (this seems to have | ||
240 | * NO real effect since this register is reloaded | ||
241 | * by hardware when the transfer mode is selected) | ||
242 | */ | ||
243 | u8 tmp, adj = (drive->dn & 1) ? 0x08 : 0x00; | ||
244 | |||
245 | tmp = get_indexed_reg(hwif, 0x13 + adj); | ||
246 | set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03); | ||
247 | } | ||
248 | |||
249 | speed = ide_max_dma_mode(drive); | ||
250 | |||
251 | if (!speed) | ||
252 | return 0; | ||
253 | |||
254 | (void) hwif->speedproc(drive, speed); | ||
255 | return ide_dma_enable(drive); | ||
256 | } | ||
257 | |||
258 | static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive) | 231 | static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive) |
259 | { | 232 | { |
260 | drive->init_speed = 0; | 233 | drive->init_speed = 0; |
261 | 234 | ||
262 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 235 | if (ide_tune_dma(drive)) |
263 | return 0; | 236 | return 0; |
264 | 237 | ||
265 | if (ide_use_fast_pio(drive)) | 238 | if (ide_use_fast_pio(drive)) |
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index 7146fe3f6ba7..23844687deea 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/pdc202xx_old.c Version 0.36 Sept 11, 2002 | 2 | * linux/drivers/ide/pci/pdc202xx_old.c Version 0.50 Mar 3, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> |
5 | * Copyright (C) 2006-2007 MontaVista Software, Inc. | 5 | * Copyright (C) 2006-2007 MontaVista Software, Inc. |
6 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
6 | * | 7 | * |
7 | * Promise Ultra33 cards with BIOS v1.20 through 1.28 will need this | 8 | * Promise Ultra33 cards with BIOS v1.20 through 1.28 will need this |
8 | * compiled into the kernel if you have more than one card installed. | 9 | * compiled into the kernel if you have more than one card installed. |
@@ -60,45 +61,7 @@ static const char *pdc_quirk_drives[] = { | |||
60 | NULL | 61 | NULL |
61 | }; | 62 | }; |
62 | 63 | ||
63 | /* A Register */ | 64 | static void pdc_old_disable_66MHz_clock(ide_hwif_t *); |
64 | #define SYNC_ERRDY_EN 0xC0 | ||
65 | |||
66 | #define SYNC_IN 0x80 /* control bit, different for master vs. slave drives */ | ||
67 | #define ERRDY_EN 0x40 /* control bit, different for master vs. slave drives */ | ||
68 | #define IORDY_EN 0x20 /* PIO: IOREADY */ | ||
69 | #define PREFETCH_EN 0x10 /* PIO: PREFETCH */ | ||
70 | |||
71 | #define PA3 0x08 /* PIO"A" timing */ | ||
72 | #define PA2 0x04 /* PIO"A" timing */ | ||
73 | #define PA1 0x02 /* PIO"A" timing */ | ||
74 | #define PA0 0x01 /* PIO"A" timing */ | ||
75 | |||
76 | /* B Register */ | ||
77 | |||
78 | #define MB2 0x80 /* DMA"B" timing */ | ||
79 | #define MB1 0x40 /* DMA"B" timing */ | ||
80 | #define MB0 0x20 /* DMA"B" timing */ | ||
81 | |||
82 | #define PB4 0x10 /* PIO_FORCE 1:0 */ | ||
83 | |||
84 | #define PB3 0x08 /* PIO"B" timing */ /* PIO flow Control mode */ | ||
85 | #define PB2 0x04 /* PIO"B" timing */ /* PIO 4 */ | ||
86 | #define PB1 0x02 /* PIO"B" timing */ /* PIO 3 half */ | ||
87 | #define PB0 0x01 /* PIO"B" timing */ /* PIO 3 other half */ | ||
88 | |||
89 | /* C Register */ | ||
90 | #define IORDYp_NO_SPEED 0x4F | ||
91 | #define SPEED_DIS 0x0F | ||
92 | |||
93 | #define DMARQp 0x80 | ||
94 | #define IORDYp 0x40 | ||
95 | #define DMAR_EN 0x20 | ||
96 | #define DMAW_EN 0x10 | ||
97 | |||
98 | #define MC3 0x08 /* DMA"C" timing */ | ||
99 | #define MC2 0x04 /* DMA"C" timing */ | ||
100 | #define MC1 0x02 /* DMA"C" timing */ | ||
101 | #define MC0 0x01 /* DMA"C" timing */ | ||
102 | 65 | ||
103 | static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) | 66 | static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) |
104 | { | 67 | { |
@@ -107,52 +70,25 @@ static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
107 | u8 drive_pci = 0x60 + (drive->dn << 2); | 70 | u8 drive_pci = 0x60 + (drive->dn << 2); |
108 | u8 speed = ide_rate_filter(drive, xferspeed); | 71 | u8 speed = ide_rate_filter(drive, xferspeed); |
109 | 72 | ||
110 | u32 drive_conf; | 73 | u8 AP = 0, BP = 0, CP = 0; |
111 | u8 AP, BP, CP, DP; | ||
112 | u8 TA = 0, TB = 0, TC = 0; | 74 | u8 TA = 0, TB = 0, TC = 0; |
113 | 75 | ||
114 | if (drive->media != ide_disk && | 76 | #if PDC202XX_DEBUG_DRIVE_INFO |
115 | drive->media != ide_cdrom && speed < XFER_SW_DMA_0) | 77 | u32 drive_conf = 0; |
116 | return -1; | ||
117 | |||
118 | pci_read_config_dword(dev, drive_pci, &drive_conf); | 78 | pci_read_config_dword(dev, drive_pci, &drive_conf); |
119 | pci_read_config_byte(dev, (drive_pci), &AP); | 79 | #endif |
120 | pci_read_config_byte(dev, (drive_pci)|0x01, &BP); | ||
121 | pci_read_config_byte(dev, (drive_pci)|0x02, &CP); | ||
122 | pci_read_config_byte(dev, (drive_pci)|0x03, &DP); | ||
123 | 80 | ||
124 | if (speed < XFER_SW_DMA_0) { | 81 | /* |
125 | if ((AP & 0x0F) || (BP & 0x07)) { | 82 | * TODO: do this once per channel |
126 | /* clear PIO modes of lower 8421 bits of A Register */ | 83 | */ |
127 | pci_write_config_byte(dev, (drive_pci), AP &~0x0F); | 84 | if (dev->device != PCI_DEVICE_ID_PROMISE_20246) |
128 | pci_read_config_byte(dev, (drive_pci), &AP); | 85 | pdc_old_disable_66MHz_clock(hwif); |
129 | |||
130 | /* clear PIO modes of lower 421 bits of B Register */ | ||
131 | pci_write_config_byte(dev, (drive_pci)|0x01, BP &~0x07); | ||
132 | pci_read_config_byte(dev, (drive_pci)|0x01, &BP); | ||
133 | |||
134 | pci_read_config_byte(dev, (drive_pci), &AP); | ||
135 | pci_read_config_byte(dev, (drive_pci)|0x01, &BP); | ||
136 | } | ||
137 | } else { | ||
138 | if ((BP & 0xF0) && (CP & 0x0F)) { | ||
139 | /* clear DMA modes of upper 842 bits of B Register */ | ||
140 | /* clear PIO forced mode upper 1 bit of B Register */ | ||
141 | pci_write_config_byte(dev, (drive_pci)|0x01, BP &~0xF0); | ||
142 | pci_read_config_byte(dev, (drive_pci)|0x01, &BP); | ||
143 | |||
144 | /* clear DMA modes of lower 8421 bits of C Register */ | ||
145 | pci_write_config_byte(dev, (drive_pci)|0x02, CP &~0x0F); | ||
146 | pci_read_config_byte(dev, (drive_pci)|0x02, &CP); | ||
147 | } | ||
148 | } | ||
149 | 86 | ||
150 | pci_read_config_byte(dev, (drive_pci), &AP); | 87 | pci_read_config_byte(dev, drive_pci, &AP); |
151 | pci_read_config_byte(dev, (drive_pci)|0x01, &BP); | 88 | pci_read_config_byte(dev, drive_pci + 1, &BP); |
152 | pci_read_config_byte(dev, (drive_pci)|0x02, &CP); | 89 | pci_read_config_byte(dev, drive_pci + 2, &CP); |
153 | 90 | ||
154 | switch(speed) { | 91 | switch(speed) { |
155 | case XFER_UDMA_6: speed = XFER_UDMA_5; | ||
156 | case XFER_UDMA_5: | 92 | case XFER_UDMA_5: |
157 | case XFER_UDMA_4: TB = 0x20; TC = 0x01; break; | 93 | case XFER_UDMA_4: TB = 0x20; TC = 0x01; break; |
158 | case XFER_UDMA_2: TB = 0x20; TC = 0x01; break; | 94 | case XFER_UDMA_2: TB = 0x20; TC = 0x01; break; |
@@ -161,7 +97,7 @@ static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
161 | case XFER_UDMA_0: | 97 | case XFER_UDMA_0: |
162 | case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break; | 98 | case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break; |
163 | case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break; | 99 | case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break; |
164 | case XFER_MW_DMA_0: | 100 | case XFER_MW_DMA_0: TB = 0xE0; TC = 0x0F; break; |
165 | case XFER_SW_DMA_2: TB = 0x60; TC = 0x05; break; | 101 | case XFER_SW_DMA_2: TB = 0x60; TC = 0x05; break; |
166 | case XFER_SW_DMA_1: TB = 0x80; TC = 0x06; break; | 102 | case XFER_SW_DMA_1: TB = 0x80; TC = 0x06; break; |
167 | case XFER_SW_DMA_0: TB = 0xC0; TC = 0x0B; break; | 103 | case XFER_SW_DMA_0: TB = 0xC0; TC = 0x0B; break; |
@@ -174,25 +110,39 @@ static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
174 | } | 110 | } |
175 | 111 | ||
176 | if (speed < XFER_SW_DMA_0) { | 112 | if (speed < XFER_SW_DMA_0) { |
177 | pci_write_config_byte(dev, (drive_pci), AP|TA); | 113 | /* |
178 | pci_write_config_byte(dev, (drive_pci)|0x01, BP|TB); | 114 | * preserve SYNC_INT / ERDDY_EN bits while clearing |
115 | * Prefetch_EN / IORDY_EN / PA[3:0] bits of register A | ||
116 | */ | ||
117 | AP &= ~0x3f; | ||
118 | if (drive->id->capability & 4) | ||
119 | AP |= 0x20; /* set IORDY_EN bit */ | ||
120 | if (drive->media == ide_disk) | ||
121 | AP |= 0x10; /* set Prefetch_EN bit */ | ||
122 | /* clear PB[4:0] bits of register B */ | ||
123 | BP &= ~0x1f; | ||
124 | pci_write_config_byte(dev, drive_pci, AP | TA); | ||
125 | pci_write_config_byte(dev, drive_pci + 1, BP | TB); | ||
179 | } else { | 126 | } else { |
180 | pci_write_config_byte(dev, (drive_pci)|0x01, BP|TB); | 127 | /* clear MB[2:0] bits of register B */ |
181 | pci_write_config_byte(dev, (drive_pci)|0x02, CP|TC); | 128 | BP &= ~0xe0; |
129 | /* clear MC[3:0] bits of register C */ | ||
130 | CP &= ~0x0f; | ||
131 | pci_write_config_byte(dev, drive_pci + 1, BP | TB); | ||
132 | pci_write_config_byte(dev, drive_pci + 2, CP | TC); | ||
182 | } | 133 | } |
183 | 134 | ||
184 | #if PDC202XX_DEBUG_DRIVE_INFO | 135 | #if PDC202XX_DEBUG_DRIVE_INFO |
185 | printk(KERN_DEBUG "%s: %s drive%d 0x%08x ", | 136 | printk(KERN_DEBUG "%s: %s drive%d 0x%08x ", |
186 | drive->name, ide_xfer_verbose(speed), | 137 | drive->name, ide_xfer_verbose(speed), |
187 | drive->dn, drive_conf); | 138 | drive->dn, drive_conf); |
188 | pci_read_config_dword(dev, drive_pci, &drive_conf); | 139 | pci_read_config_dword(dev, drive_pci, &drive_conf); |
189 | printk("0x%08x\n", drive_conf); | 140 | printk("0x%08x\n", drive_conf); |
190 | #endif /* PDC202XX_DEBUG_DRIVE_INFO */ | 141 | #endif |
191 | 142 | ||
192 | return (ide_config_drive_speed(drive, speed)); | 143 | return ide_config_drive_speed(drive, speed); |
193 | } | 144 | } |
194 | 145 | ||
195 | |||
196 | static void pdc202xx_tune_drive(ide_drive_t *drive, u8 pio) | 146 | static void pdc202xx_tune_drive(ide_drive_t *drive, u8 pio) |
197 | { | 147 | { |
198 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); | 148 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
@@ -210,6 +160,8 @@ static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif) | |||
210 | * Set the control register to use the 66MHz system | 160 | * Set the control register to use the 66MHz system |
211 | * clock for UDMA 3/4/5 mode operation when necessary. | 161 | * clock for UDMA 3/4/5 mode operation when necessary. |
212 | * | 162 | * |
163 | * FIXME: this register is shared by both channels, some locking is needed | ||
164 | * | ||
213 | * It may also be possible to leave the 66MHz clock on | 165 | * It may also be possible to leave the 66MHz clock on |
214 | * and readjust the timing parameters. | 166 | * and readjust the timing parameters. |
215 | */ | 167 | */ |
@@ -229,65 +181,11 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) | |||
229 | outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); | 181 | outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); |
230 | } | 182 | } |
231 | 183 | ||
232 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
233 | { | ||
234 | struct hd_driveid *id = drive->id; | ||
235 | ide_hwif_t *hwif = HWIF(drive); | ||
236 | struct pci_dev *dev = hwif->pci_dev; | ||
237 | u32 drive_conf = 0; | ||
238 | u8 drive_pci = 0x60 + (drive->dn << 2); | ||
239 | u8 test1 = 0, test2 = 0, speed = -1; | ||
240 | u8 AP = 0; | ||
241 | |||
242 | if (dev->device != PCI_DEVICE_ID_PROMISE_20246) | ||
243 | pdc_old_disable_66MHz_clock(drive->hwif); | ||
244 | |||
245 | drive_pci = 0x60 + (drive->dn << 2); | ||
246 | pci_read_config_dword(dev, drive_pci, &drive_conf); | ||
247 | if ((drive_conf != 0x004ff304) && (drive_conf != 0x004ff3c4)) | ||
248 | goto chipset_is_set; | ||
249 | |||
250 | pci_read_config_byte(dev, drive_pci, &test1); | ||
251 | if (!(test1 & SYNC_ERRDY_EN)) { | ||
252 | if (drive->select.b.unit & 0x01) { | ||
253 | pci_read_config_byte(dev, drive_pci - 4, &test2); | ||
254 | if ((test2 & SYNC_ERRDY_EN) && | ||
255 | !(test1 & SYNC_ERRDY_EN)) { | ||
256 | pci_write_config_byte(dev, drive_pci, | ||
257 | test1|SYNC_ERRDY_EN); | ||
258 | } | ||
259 | } else { | ||
260 | pci_write_config_byte(dev, drive_pci, | ||
261 | test1|SYNC_ERRDY_EN); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | chipset_is_set: | ||
266 | |||
267 | pci_read_config_byte(dev, (drive_pci), &AP); | ||
268 | if (id->capability & 4) /* IORDY_EN */ | ||
269 | pci_write_config_byte(dev, (drive_pci), AP|IORDY_EN); | ||
270 | pci_read_config_byte(dev, (drive_pci), &AP); | ||
271 | if (drive->media == ide_disk) /* PREFETCH_EN */ | ||
272 | pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN); | ||
273 | |||
274 | speed = ide_max_dma_mode(drive); | ||
275 | |||
276 | if (!(speed)) { | ||
277 | /* restore original pci-config space */ | ||
278 | pci_write_config_dword(dev, drive_pci, drive_conf); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | (void) hwif->speedproc(drive, speed); | ||
283 | return ide_dma_enable(drive); | ||
284 | } | ||
285 | |||
286 | static int pdc202xx_config_drive_xfer_rate (ide_drive_t *drive) | 184 | static int pdc202xx_config_drive_xfer_rate (ide_drive_t *drive) |
287 | { | 185 | { |
288 | drive->init_speed = 0; | 186 | drive->init_speed = 0; |
289 | 187 | ||
290 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 188 | if (ide_tune_dma(drive)) |
291 | return 0; | 189 | return 0; |
292 | 190 | ||
293 | if (ide_use_fast_pio(drive)) | 191 | if (ide_use_fast_pio(drive)) |
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c index b5ae0c50e216..523363c93794 100644 --- a/drivers/ide/pci/sc1200.c +++ b/drivers/ide/pci/sc1200.c | |||
@@ -1,7 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003 | 2 | * linux/drivers/ide/pci/sc1200.c Version 0.94 Mar 10 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com> | 4 | * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com> |
5 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
6 | * | ||
5 | * May be copied or modified under the terms of the GNU General Public License | 7 | * May be copied or modified under the terms of the GNU General Public License |
6 | * | 8 | * |
7 | * Development of this chipset driver was funded | 9 | * Development of this chipset driver was funded |
@@ -93,64 +95,50 @@ static const unsigned int sc1200_pio_timings[4][5] = | |||
93 | */ | 95 | */ |
94 | //#define SC1200_BAD_PIO(timings) (((timings)&~0x80000000)==0x00009172) | 96 | //#define SC1200_BAD_PIO(timings) (((timings)&~0x80000000)==0x00009172) |
95 | 97 | ||
96 | static int sc1200_autoselect_dma_mode (ide_drive_t *drive) | 98 | static void sc1200_tunepio(ide_drive_t *drive, u8 pio) |
97 | { | 99 | { |
98 | int udma_ok = 1, mode = 0; | 100 | ide_hwif_t *hwif = drive->hwif; |
99 | ide_hwif_t *hwif = HWIF(drive); | 101 | struct pci_dev *pdev = hwif->pci_dev; |
100 | int unit = drive->select.b.unit; | 102 | unsigned int basereg = hwif->channel ? 0x50 : 0x40, format = 0; |
101 | ide_drive_t *mate = &hwif->drives[unit^1]; | 103 | |
102 | struct hd_driveid *id = drive->id; | 104 | pci_read_config_dword(pdev, basereg + 4, &format); |
103 | 105 | format = (format >> 31) & 1; | |
104 | /* | 106 | if (format) |
105 | * The SC1200 specifies that two drives sharing a cable cannot | 107 | format += sc1200_get_pci_clock(); |
106 | * mix UDMA/MDMA. It has to be one or the other, for the pair, | 108 | pci_write_config_dword(pdev, basereg + ((drive->dn & 1) << 3), |
107 | * though different timings can still be chosen for each drive. | 109 | sc1200_pio_timings[format][pio]); |
108 | * We could set the appropriate timing bits on the fly, | ||
109 | * but that might be a bit confusing. So, for now we statically | ||
110 | * handle this requirement by looking at our mate drive to see | ||
111 | * what it is capable of, before choosing a mode for our own drive. | ||
112 | */ | ||
113 | if (mate->present) { | ||
114 | struct hd_driveid *mateid = mate->id; | ||
115 | if (mateid && (mateid->capability & 1) && !__ide_dma_bad_drive(mate)) { | ||
116 | if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7)) | ||
117 | udma_ok = 1; | ||
118 | else if ((mateid->field_valid & 2) && (mateid->dma_mword & 7)) | ||
119 | udma_ok = 0; | ||
120 | else | ||
121 | udma_ok = 1; | ||
122 | } | ||
123 | } | ||
124 | /* | ||
125 | * Now see what the current drive is capable of, | ||
126 | * selecting UDMA only if the mate said it was ok. | ||
127 | */ | ||
128 | if (id && (id->capability & 1) && hwif->autodma && !__ide_dma_bad_drive(drive)) { | ||
129 | if (udma_ok && (id->field_valid & 4) && (id->dma_ultra & 7)) { | ||
130 | if (id->dma_ultra & 4) | ||
131 | mode = XFER_UDMA_2; | ||
132 | else if (id->dma_ultra & 2) | ||
133 | mode = XFER_UDMA_1; | ||
134 | else if (id->dma_ultra & 1) | ||
135 | mode = XFER_UDMA_0; | ||
136 | } | ||
137 | if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) { | ||
138 | if (id->dma_mword & 4) | ||
139 | mode = XFER_MW_DMA_2; | ||
140 | else if (id->dma_mword & 2) | ||
141 | mode = XFER_MW_DMA_1; | ||
142 | else if (id->dma_mword & 1) | ||
143 | mode = XFER_MW_DMA_0; | ||
144 | } | ||
145 | } | ||
146 | return mode; | ||
147 | } | 110 | } |
148 | 111 | ||
149 | /* | 112 | /* |
150 | * sc1200_config_dma2() handles selection/setting of DMA/UDMA modes | 113 | * The SC1200 specifies that two drives sharing a cable cannot mix |
151 | * for both the chipset and drive. | 114 | * UDMA/MDMA. It has to be one or the other, for the pair, though |
115 | * different timings can still be chosen for each drive. We could | ||
116 | * set the appropriate timing bits on the fly, but that might be | ||
117 | * a bit confusing. So, for now we statically handle this requirement | ||
118 | * by looking at our mate drive to see what it is capable of, before | ||
119 | * choosing a mode for our own drive. | ||
152 | */ | 120 | */ |
153 | static int sc1200_config_dma2 (ide_drive_t *drive, int mode) | 121 | static u8 sc1200_udma_filter(ide_drive_t *drive) |
122 | { | ||
123 | ide_hwif_t *hwif = drive->hwif; | ||
124 | ide_drive_t *mate = &hwif->drives[(drive->dn & 1) ^ 1]; | ||
125 | struct hd_driveid *mateid = mate->id; | ||
126 | u8 mask = hwif->ultra_mask; | ||
127 | |||
128 | if (mate->present == 0) | ||
129 | goto out; | ||
130 | |||
131 | if ((mateid->capability & 1) && __ide_dma_bad_drive(mate) == 0) { | ||
132 | if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7)) | ||
133 | goto out; | ||
134 | if ((mateid->field_valid & 2) && (mateid->dma_mword & 7)) | ||
135 | mask = 0; | ||
136 | } | ||
137 | out: | ||
138 | return mask; | ||
139 | } | ||
140 | |||
141 | static int sc1200_tune_chipset(ide_drive_t *drive, u8 mode) | ||
154 | { | 142 | { |
155 | ide_hwif_t *hwif = HWIF(drive); | 143 | ide_hwif_t *hwif = HWIF(drive); |
156 | int unit = drive->select.b.unit; | 144 | int unit = drive->select.b.unit; |
@@ -158,20 +146,26 @@ static int sc1200_config_dma2 (ide_drive_t *drive, int mode) | |||
158 | unsigned short pci_clock; | 146 | unsigned short pci_clock; |
159 | unsigned int basereg = hwif->channel ? 0x50 : 0x40; | 147 | unsigned int basereg = hwif->channel ? 0x50 : 0x40; |
160 | 148 | ||
161 | /* | 149 | mode = ide_rate_filter(drive, mode); |
162 | * Default to DMA-off in case we run into trouble here. | ||
163 | */ | ||
164 | hwif->dma_off_quietly(drive); /* turn off DMA while we fiddle */ | ||
165 | outb(inb(hwif->dma_base+2)&~(unit?0x40:0x20), hwif->dma_base+2); /* clear DMA_capable bit */ | ||
166 | 150 | ||
167 | /* | 151 | /* |
168 | * Tell the drive to switch to the new mode; abort on failure. | 152 | * Tell the drive to switch to the new mode; abort on failure. |
169 | */ | 153 | */ |
170 | if (!mode || sc1200_set_xfer_mode(drive, mode)) { | 154 | if (sc1200_set_xfer_mode(drive, mode)) { |
171 | printk("SC1200: set xfer mode failure\n"); | 155 | printk("SC1200: set xfer mode failure\n"); |
172 | return 1; /* failure */ | 156 | return 1; /* failure */ |
173 | } | 157 | } |
174 | 158 | ||
159 | switch (mode) { | ||
160 | case XFER_PIO_4: | ||
161 | case XFER_PIO_3: | ||
162 | case XFER_PIO_2: | ||
163 | case XFER_PIO_1: | ||
164 | case XFER_PIO_0: | ||
165 | sc1200_tunepio(drive, mode - XFER_PIO_0); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
175 | pci_clock = sc1200_get_pci_clock(); | 169 | pci_clock = sc1200_get_pci_clock(); |
176 | 170 | ||
177 | /* | 171 | /* |
@@ -224,11 +218,9 @@ static int sc1200_config_dma2 (ide_drive_t *drive, int mode) | |||
224 | case PCI_CLK_66: timings = 0x00015151; break; | 218 | case PCI_CLK_66: timings = 0x00015151; break; |
225 | } | 219 | } |
226 | break; | 220 | break; |
227 | } | 221 | default: |
228 | 222 | BUG(); | |
229 | if (timings == 0) { | 223 | break; |
230 | printk("%s: sc1200_config_dma: huh? mode=%02x clk=%x \n", drive->name, mode, pci_clock); | ||
231 | return 1; /* failure */ | ||
232 | } | 224 | } |
233 | 225 | ||
234 | if (unit == 0) { /* are we configuring drive0? */ | 226 | if (unit == 0) { /* are we configuring drive0? */ |
@@ -239,8 +231,6 @@ static int sc1200_config_dma2 (ide_drive_t *drive, int mode) | |||
239 | pci_write_config_dword(hwif->pci_dev, basereg+12, timings); | 231 | pci_write_config_dword(hwif->pci_dev, basereg+12, timings); |
240 | } | 232 | } |
241 | 233 | ||
242 | outb(inb(hwif->dma_base+2)|(unit?0x40:0x20), hwif->dma_base+2); /* set DMA_capable bit */ | ||
243 | |||
244 | return 0; /* success */ | 234 | return 0; /* success */ |
245 | } | 235 | } |
246 | 236 | ||
@@ -250,7 +240,10 @@ static int sc1200_config_dma2 (ide_drive_t *drive, int mode) | |||
250 | */ | 240 | */ |
251 | static int sc1200_config_dma (ide_drive_t *drive) | 241 | static int sc1200_config_dma (ide_drive_t *drive) |
252 | { | 242 | { |
253 | return sc1200_config_dma2(drive, sc1200_autoselect_dma_mode(drive)); | 243 | if (ide_tune_dma(drive)) |
244 | return 0; | ||
245 | |||
246 | return 1; | ||
254 | } | 247 | } |
255 | 248 | ||
256 | 249 | ||
@@ -290,10 +283,11 @@ static int sc1200_ide_dma_end (ide_drive_t *drive) | |||
290 | static void sc1200_tuneproc (ide_drive_t *drive, byte pio) /* mode=255 means "autotune" */ | 283 | static void sc1200_tuneproc (ide_drive_t *drive, byte pio) /* mode=255 means "autotune" */ |
291 | { | 284 | { |
292 | ide_hwif_t *hwif = HWIF(drive); | 285 | ide_hwif_t *hwif = HWIF(drive); |
293 | unsigned int format; | ||
294 | static byte modes[5] = {XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, XFER_PIO_4}; | ||
295 | int mode = -1; | 286 | int mode = -1; |
296 | 287 | ||
288 | /* | ||
289 | * bad abuse of ->tuneproc interface | ||
290 | */ | ||
297 | switch (pio) { | 291 | switch (pio) { |
298 | case 200: mode = XFER_UDMA_0; break; | 292 | case 200: mode = XFER_UDMA_0; break; |
299 | case 201: mode = XFER_UDMA_1; break; | 293 | case 201: mode = XFER_UDMA_1; break; |
@@ -304,20 +298,17 @@ static void sc1200_tuneproc (ide_drive_t *drive, byte pio) /* mode=255 means "au | |||
304 | } | 298 | } |
305 | if (mode != -1) { | 299 | if (mode != -1) { |
306 | printk("SC1200: %s: changing (U)DMA mode\n", drive->name); | 300 | printk("SC1200: %s: changing (U)DMA mode\n", drive->name); |
307 | (void)sc1200_config_dma2(drive, mode); | 301 | hwif->dma_off_quietly(drive); |
302 | if (sc1200_tune_chipset(drive, mode) == 0) | ||
303 | hwif->dma_host_on(drive); | ||
308 | return; | 304 | return; |
309 | } | 305 | } |
310 | 306 | ||
311 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); | 307 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
312 | printk("SC1200: %s: setting PIO mode%d\n", drive->name, pio); | 308 | printk("SC1200: %s: setting PIO mode%d\n", drive->name, pio); |
313 | if (!sc1200_set_xfer_mode(drive, modes[pio])) { | 309 | |
314 | unsigned int basereg = hwif->channel ? 0x50 : 0x40; | 310 | if (sc1200_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0) |
315 | pci_read_config_dword (hwif->pci_dev, basereg+4, &format); | 311 | sc1200_tunepio(drive, pio); |
316 | format = (format >> 31) & 1; | ||
317 | if (format) | ||
318 | format += sc1200_get_pci_clock(); | ||
319 | pci_write_config_dword(hwif->pci_dev, basereg + (drive->select.b.unit << 3), sc1200_pio_timings[format][pio]); | ||
320 | } | ||
321 | } | 312 | } |
322 | 313 | ||
323 | #ifdef CONFIG_PM | 314 | #ifdef CONFIG_PM |
@@ -438,12 +429,12 @@ static int sc1200_resume (struct pci_dev *dev) | |||
438 | for (d = 0; d < MAX_DRIVES; ++d) { | 429 | for (d = 0; d < MAX_DRIVES; ++d) { |
439 | ide_drive_t *drive = &(hwif->drives[d]); | 430 | ide_drive_t *drive = &(hwif->drives[d]); |
440 | if (drive->present && !__ide_dma_bad_drive(drive)) { | 431 | if (drive->present && !__ide_dma_bad_drive(drive)) { |
441 | int was_using_dma = drive->using_dma; | 432 | int enable_dma = drive->using_dma; |
442 | hwif->dma_off_quietly(drive); | 433 | hwif->dma_off_quietly(drive); |
443 | sc1200_config_dma(drive); | 434 | if (sc1200_config_dma(drive)) |
444 | if (!was_using_dma && drive->using_dma) { | 435 | enable_dma = 0; |
445 | hwif->dma_off_quietly(drive); | 436 | if (enable_dma) |
446 | } | 437 | hwif->dma_host_on(drive); |
447 | } | 438 | } |
448 | } | 439 | } |
449 | } | 440 | } |
@@ -461,11 +452,13 @@ static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif) | |||
461 | hwif->serialized = hwif->mate->serialized = 1; | 452 | hwif->serialized = hwif->mate->serialized = 1; |
462 | hwif->autodma = 0; | 453 | hwif->autodma = 0; |
463 | if (hwif->dma_base) { | 454 | if (hwif->dma_base) { |
455 | hwif->udma_filter = sc1200_udma_filter; | ||
464 | hwif->ide_dma_check = &sc1200_config_dma; | 456 | hwif->ide_dma_check = &sc1200_config_dma; |
465 | hwif->ide_dma_end = &sc1200_ide_dma_end; | 457 | hwif->ide_dma_end = &sc1200_ide_dma_end; |
466 | if (!noautodma) | 458 | if (!noautodma) |
467 | hwif->autodma = 1; | 459 | hwif->autodma = 1; |
468 | hwif->tuneproc = &sc1200_tuneproc; | 460 | hwif->tuneproc = &sc1200_tuneproc; |
461 | hwif->speedproc = &sc1200_tune_chipset; | ||
469 | } | 462 | } |
470 | hwif->atapi_dma = 1; | 463 | hwif->atapi_dma = 1; |
471 | hwif->ultra_mask = 0x07; | 464 | hwif->ultra_mask = 0x07; |
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index cbf936325355..55bc0a32e34f 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c | |||
@@ -322,26 +322,6 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | /** | 324 | /** |
325 | * scc_config_chipset_for_dma - configure for DMA | ||
326 | * @drive: drive to configure | ||
327 | * | ||
328 | * Called by scc_config_drive_for_dma(). | ||
329 | */ | ||
330 | |||
331 | static int scc_config_chipset_for_dma(ide_drive_t *drive) | ||
332 | { | ||
333 | u8 speed = ide_max_dma_mode(drive); | ||
334 | |||
335 | if (!speed) | ||
336 | return 0; | ||
337 | |||
338 | if (scc_tune_chipset(drive, speed)) | ||
339 | return 0; | ||
340 | |||
341 | return ide_dma_enable(drive); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * scc_configure_drive_for_dma - set up for DMA transfers | 325 | * scc_configure_drive_for_dma - set up for DMA transfers |
346 | * @drive: drive we are going to set up | 326 | * @drive: drive we are going to set up |
347 | * | 327 | * |
@@ -354,7 +334,7 @@ static int scc_config_chipset_for_dma(ide_drive_t *drive) | |||
354 | 334 | ||
355 | static int scc_config_drive_for_dma(ide_drive_t *drive) | 335 | static int scc_config_drive_for_dma(ide_drive_t *drive) |
356 | { | 336 | { |
357 | if (ide_use_dma(drive) && scc_config_chipset_for_dma(drive)) | 337 | if (ide_tune_dma(drive)) |
358 | return 0; | 338 | return 0; |
359 | 339 | ||
360 | if (ide_use_fast_pio(drive)) | 340 | if (ide_use_fast_pio(drive)) |
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c index 2fa6d92d16cc..6234f806c6b5 100644 --- a/drivers/ide/pci/serverworks.c +++ b/drivers/ide/pci/serverworks.c | |||
@@ -1,9 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/serverworks.c Version 0.8 25 Ebr 2003 | 2 | * linux/drivers/ide/pci/serverworks.c Version 0.9 Mar 4 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2000 Michel Aubry | 4 | * Copyright (C) 1998-2000 Michel Aubry |
5 | * Copyright (C) 1998-2000 Andrzej Krzysztofowicz | 5 | * Copyright (C) 1998-2000 Andrzej Krzysztofowicz |
6 | * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> | 6 | * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> |
7 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
7 | * Portions copyright (c) 2001 Sun Microsystems | 8 | * Portions copyright (c) 2001 Sun Microsystems |
8 | * | 9 | * |
9 | * | 10 | * |
@@ -136,19 +137,14 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
136 | 137 | ||
137 | ide_hwif_t *hwif = HWIF(drive); | 138 | ide_hwif_t *hwif = HWIF(drive); |
138 | struct pci_dev *dev = hwif->pci_dev; | 139 | struct pci_dev *dev = hwif->pci_dev; |
139 | u8 speed; | 140 | u8 speed = ide_rate_filter(drive, xferspeed); |
140 | u8 pio = ide_get_best_pio_mode(drive, 255, 5, NULL); | 141 | u8 pio = ide_get_best_pio_mode(drive, 255, 4, NULL); |
141 | u8 unit = (drive->select.b.unit & 0x01); | 142 | u8 unit = (drive->select.b.unit & 0x01); |
142 | u8 csb5 = svwks_csb_check(dev); | 143 | u8 csb5 = svwks_csb_check(dev); |
143 | u8 ultra_enable = 0, ultra_timing = 0; | 144 | u8 ultra_enable = 0, ultra_timing = 0; |
144 | u8 dma_timing = 0, pio_timing = 0; | 145 | u8 dma_timing = 0, pio_timing = 0; |
145 | u16 csb5_pio = 0; | 146 | u16 csb5_pio = 0; |
146 | 147 | ||
147 | if (xferspeed == 255) /* PIO auto-tuning */ | ||
148 | speed = XFER_PIO_0 + pio; | ||
149 | else | ||
150 | speed = ide_rate_filter(drive, xferspeed); | ||
151 | |||
152 | /* If we are about to put a disk into UDMA mode we screwed up. | 148 | /* If we are about to put a disk into UDMA mode we screwed up. |
153 | Our code assumes we never _ever_ do this on an OSB4 */ | 149 | Our code assumes we never _ever_ do this on an OSB4 */ |
154 | 150 | ||
@@ -231,6 +227,9 @@ oem_setup_failed: | |||
231 | case XFER_MW_DMA_2: | 227 | case XFER_MW_DMA_2: |
232 | case XFER_MW_DMA_1: | 228 | case XFER_MW_DMA_1: |
233 | case XFER_MW_DMA_0: | 229 | case XFER_MW_DMA_0: |
230 | /* | ||
231 | * TODO: always setup PIO mode so this won't be needed | ||
232 | */ | ||
234 | pio_timing |= pio_modes[pio]; | 233 | pio_timing |= pio_modes[pio]; |
235 | csb5_pio |= (pio << (4*drive->dn)); | 234 | csb5_pio |= (pio << (4*drive->dn)); |
236 | dma_timing |= dma_modes[speed - XFER_MW_DMA_0]; | 235 | dma_timing |= dma_modes[speed - XFER_MW_DMA_0]; |
@@ -242,6 +241,9 @@ oem_setup_failed: | |||
242 | case XFER_UDMA_2: | 241 | case XFER_UDMA_2: |
243 | case XFER_UDMA_1: | 242 | case XFER_UDMA_1: |
244 | case XFER_UDMA_0: | 243 | case XFER_UDMA_0: |
244 | /* | ||
245 | * TODO: always setup PIO mode so this won't be needed | ||
246 | */ | ||
245 | pio_timing |= pio_modes[pio]; | 247 | pio_timing |= pio_modes[pio]; |
246 | csb5_pio |= (pio << (4*drive->dn)); | 248 | csb5_pio |= (pio << (4*drive->dn)); |
247 | dma_timing |= dma_modes[2]; | 249 | dma_timing |= dma_modes[2]; |
@@ -262,72 +264,21 @@ oem_setup_failed: | |||
262 | return (ide_config_drive_speed(drive, speed)); | 264 | return (ide_config_drive_speed(drive, speed)); |
263 | } | 265 | } |
264 | 266 | ||
265 | static void config_chipset_for_pio (ide_drive_t *drive) | ||
266 | { | ||
267 | u16 eide_pio_timing[6] = {960, 480, 240, 180, 120, 90}; | ||
268 | u16 xfer_pio = drive->id->eide_pio_modes; | ||
269 | u8 timing, speed, pio; | ||
270 | |||
271 | pio = ide_get_best_pio_mode(drive, 255, 5, NULL); | ||
272 | |||
273 | if (xfer_pio > 4) | ||
274 | xfer_pio = 0; | ||
275 | |||
276 | if (drive->id->eide_pio_iordy > 0) | ||
277 | for (xfer_pio = 5; | ||
278 | xfer_pio>0 && | ||
279 | drive->id->eide_pio_iordy>eide_pio_timing[xfer_pio]; | ||
280 | xfer_pio--); | ||
281 | else | ||
282 | xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 : | ||
283 | (drive->id->eide_pio_modes & 2) ? 0x04 : | ||
284 | (drive->id->eide_pio_modes & 1) ? 0x03 : | ||
285 | (drive->id->tPIO & 2) ? 0x02 : | ||
286 | (drive->id->tPIO & 1) ? 0x01 : xfer_pio; | ||
287 | |||
288 | timing = (xfer_pio >= pio) ? xfer_pio : pio; | ||
289 | |||
290 | switch(timing) { | ||
291 | case 4: speed = XFER_PIO_4;break; | ||
292 | case 3: speed = XFER_PIO_3;break; | ||
293 | case 2: speed = XFER_PIO_2;break; | ||
294 | case 1: speed = XFER_PIO_1;break; | ||
295 | default: | ||
296 | speed = (!drive->id->tPIO) ? XFER_PIO_0 : XFER_PIO_SLOW; | ||
297 | break; | ||
298 | } | ||
299 | (void) svwks_tune_chipset(drive, speed); | ||
300 | drive->current_speed = speed; | ||
301 | } | ||
302 | |||
303 | static void svwks_tune_drive (ide_drive_t *drive, u8 pio) | 267 | static void svwks_tune_drive (ide_drive_t *drive, u8 pio) |
304 | { | 268 | { |
305 | if(pio == 255) | 269 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
306 | (void) svwks_tune_chipset(drive, 255); | 270 | (void)svwks_tune_chipset(drive, XFER_PIO_0 + pio); |
307 | else | ||
308 | (void) svwks_tune_chipset(drive, (XFER_PIO_0 + pio)); | ||
309 | } | ||
310 | |||
311 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
312 | { | ||
313 | u8 speed = ide_max_dma_mode(drive); | ||
314 | |||
315 | if (!(speed)) | ||
316 | speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL); | ||
317 | |||
318 | (void) svwks_tune_chipset(drive, speed); | ||
319 | return ide_dma_enable(drive); | ||
320 | } | 271 | } |
321 | 272 | ||
322 | static int svwks_config_drive_xfer_rate (ide_drive_t *drive) | 273 | static int svwks_config_drive_xfer_rate (ide_drive_t *drive) |
323 | { | 274 | { |
324 | drive->init_speed = 0; | 275 | drive->init_speed = 0; |
325 | 276 | ||
326 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 277 | if (ide_tune_dma(drive)) |
327 | return 0; | 278 | return 0; |
328 | 279 | ||
329 | if (ide_use_fast_pio(drive)) | 280 | if (ide_use_fast_pio(drive)) |
330 | config_chipset_for_pio(drive); | 281 | svwks_tune_drive(drive, 255); |
331 | 282 | ||
332 | return -1; | 283 | return -1; |
333 | } | 284 | } |
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index d09e74c2996e..1a4444e7226a 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c | |||
@@ -375,28 +375,6 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed) | |||
375 | } | 375 | } |
376 | 376 | ||
377 | /** | 377 | /** |
378 | * config_chipset_for_dma - configure for DMA | ||
379 | * @drive: drive to configure | ||
380 | * | ||
381 | * Called by the IDE layer when it wants the timings set up. | ||
382 | * For the CMD680 we also need to set up the PIO timings and | ||
383 | * enable DMA. | ||
384 | */ | ||
385 | |||
386 | static int config_chipset_for_dma (ide_drive_t *drive) | ||
387 | { | ||
388 | u8 speed = ide_max_dma_mode(drive); | ||
389 | |||
390 | if (!speed) | ||
391 | return 0; | ||
392 | |||
393 | if (siimage_tune_chipset(drive, speed)) | ||
394 | return 0; | ||
395 | |||
396 | return ide_dma_enable(drive); | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * siimage_configure_drive_for_dma - set up for DMA transfers | 378 | * siimage_configure_drive_for_dma - set up for DMA transfers |
401 | * @drive: drive we are going to set up | 379 | * @drive: drive we are going to set up |
402 | * | 380 | * |
@@ -408,7 +386,7 @@ static int config_chipset_for_dma (ide_drive_t *drive) | |||
408 | 386 | ||
409 | static int siimage_config_drive_for_dma (ide_drive_t *drive) | 387 | static int siimage_config_drive_for_dma (ide_drive_t *drive) |
410 | { | 388 | { |
411 | if (ide_use_dma(drive) && config_chipset_for_dma(drive)) | 389 | if (ide_tune_dma(drive)) |
412 | return 0; | 390 | return 0; |
413 | 391 | ||
414 | if (ide_use_fast_pio(drive)) | 392 | if (ide_use_fast_pio(drive)) |
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 2bde1b92784a..bb6cc4aedd63 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c | |||
@@ -1,9 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/sis5513.c Version 0.16ac+vp Jun 18, 2003 | 2 | * linux/drivers/ide/pci/sis5513.c Version 0.20 Mar 4, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
5 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer | 5 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer |
6 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> | 6 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> |
7 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
8 | * | ||
7 | * May be copied or modified under the terms of the GNU General Public License | 9 | * May be copied or modified under the terms of the GNU General Public License |
8 | * | 10 | * |
9 | * | 11 | * |
@@ -448,36 +450,15 @@ static void config_drive_art_rwp (ide_drive_t *drive) | |||
448 | pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); | 450 | pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); |
449 | } | 451 | } |
450 | 452 | ||
451 | |||
452 | /* Set per-drive active and recovery time */ | 453 | /* Set per-drive active and recovery time */ |
453 | static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) | 454 | static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) |
454 | { | 455 | { |
455 | ide_hwif_t *hwif = HWIF(drive); | 456 | ide_hwif_t *hwif = HWIF(drive); |
456 | struct pci_dev *dev = hwif->pci_dev; | 457 | struct pci_dev *dev = hwif->pci_dev; |
457 | 458 | ||
458 | u8 timing, drive_pci, test1, test2; | 459 | u8 drive_pci, test1, test2; |
459 | |||
460 | u16 eide_pio_timing[6] = {600, 390, 240, 180, 120, 90}; | ||
461 | u16 xfer_pio = drive->id->eide_pio_modes; | ||
462 | 460 | ||
463 | config_drive_art_rwp(drive); | 461 | config_drive_art_rwp(drive); |
464 | pio = ide_get_best_pio_mode(drive, 255, pio, NULL); | ||
465 | |||
466 | if (xfer_pio> 4) | ||
467 | xfer_pio = 0; | ||
468 | |||
469 | if (drive->id->eide_pio_iordy > 0) { | ||
470 | for (xfer_pio = 5; | ||
471 | (xfer_pio > 0) && | ||
472 | (drive->id->eide_pio_iordy > eide_pio_timing[xfer_pio]); | ||
473 | xfer_pio--); | ||
474 | } else { | ||
475 | xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 : | ||
476 | (drive->id->eide_pio_modes & 2) ? 0x04 : | ||
477 | (drive->id->eide_pio_modes & 1) ? 0x03 : xfer_pio; | ||
478 | } | ||
479 | |||
480 | timing = (xfer_pio >= pio) ? xfer_pio : pio; | ||
481 | 462 | ||
482 | /* In pre ATA_133 case, drives sit at 0x40 + 4*drive->dn */ | 463 | /* In pre ATA_133 case, drives sit at 0x40 + 4*drive->dn */ |
483 | drive_pci = 0x40; | 464 | drive_pci = 0x40; |
@@ -500,17 +481,18 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) | |||
500 | test1 &= ~0x0F; | 481 | test1 &= ~0x0F; |
501 | test2 &= ~0x07; | 482 | test2 &= ~0x07; |
502 | 483 | ||
503 | switch(timing) { | 484 | switch(pio) { |
504 | case 4: test1 |= 0x01; test2 |= 0x03; break; | 485 | case 4: test1 |= 0x01; test2 |= 0x03; break; |
505 | case 3: test1 |= 0x03; test2 |= 0x03; break; | 486 | case 3: test1 |= 0x03; test2 |= 0x03; break; |
506 | case 2: test1 |= 0x04; test2 |= 0x04; break; | 487 | case 2: test1 |= 0x04; test2 |= 0x04; break; |
507 | case 1: test1 |= 0x07; test2 |= 0x06; break; | 488 | case 1: test1 |= 0x07; test2 |= 0x06; break; |
489 | case 0: /* PIO0: register setting == X000 */ | ||
508 | default: break; | 490 | default: break; |
509 | } | 491 | } |
510 | pci_write_config_byte(dev, drive_pci, test1); | 492 | pci_write_config_byte(dev, drive_pci, test1); |
511 | pci_write_config_byte(dev, drive_pci+1, test2); | 493 | pci_write_config_byte(dev, drive_pci+1, test2); |
512 | } else if (chipset_family < ATA_133) { | 494 | } else if (chipset_family < ATA_133) { |
513 | switch(timing) { /* active recovery | 495 | switch(pio) { /* active recovery |
514 | v v */ | 496 | v v */ |
515 | case 4: test1 = 0x30|0x01; break; | 497 | case 4: test1 = 0x30|0x01; break; |
516 | case 3: test1 = 0x30|0x03; break; | 498 | case 3: test1 = 0x30|0x03; break; |
@@ -525,24 +507,28 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) | |||
525 | pci_read_config_dword(dev, drive_pci, &test3); | 507 | pci_read_config_dword(dev, drive_pci, &test3); |
526 | test3 &= 0xc0c00fff; | 508 | test3 &= 0xc0c00fff; |
527 | if (test3 & 0x08) { | 509 | if (test3 & 0x08) { |
528 | test3 |= (unsigned long)ini_time_value[ATA_133][timing] << 12; | 510 | test3 |= ini_time_value[ATA_133][pio] << 12; |
529 | test3 |= (unsigned long)act_time_value[ATA_133][timing] << 16; | 511 | test3 |= act_time_value[ATA_133][pio] << 16; |
530 | test3 |= (unsigned long)rco_time_value[ATA_133][timing] << 24; | 512 | test3 |= rco_time_value[ATA_133][pio] << 24; |
531 | } else { | 513 | } else { |
532 | test3 |= (unsigned long)ini_time_value[ATA_100][timing] << 12; | 514 | test3 |= ini_time_value[ATA_100][pio] << 12; |
533 | test3 |= (unsigned long)act_time_value[ATA_100][timing] << 16; | 515 | test3 |= act_time_value[ATA_100][pio] << 16; |
534 | test3 |= (unsigned long)rco_time_value[ATA_100][timing] << 24; | 516 | test3 |= rco_time_value[ATA_100][pio] << 24; |
535 | } | 517 | } |
536 | pci_write_config_dword(dev, drive_pci, test3); | 518 | pci_write_config_dword(dev, drive_pci, test3); |
537 | } | 519 | } |
538 | } | 520 | } |
539 | 521 | ||
540 | static int config_chipset_for_pio (ide_drive_t *drive, u8 pio) | 522 | static int sis5513_tune_drive(ide_drive_t *drive, u8 pio) |
541 | { | 523 | { |
542 | if (pio == 255) | 524 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
543 | pio = ide_find_best_mode(drive, XFER_PIO | XFER_EPIO) - XFER_PIO_0; | ||
544 | config_art_rwp_pio(drive, pio); | 525 | config_art_rwp_pio(drive, pio); |
545 | return ide_config_drive_speed(drive, XFER_PIO_0 + min_t(u8, pio, 4)); | 526 | return ide_config_drive_speed(drive, XFER_PIO_0 + pio); |
527 | } | ||
528 | |||
529 | static void sis5513_tuneproc(ide_drive_t *drive, u8 pio) | ||
530 | { | ||
531 | (void)sis5513_tune_drive(drive, pio); | ||
546 | } | 532 | } |
547 | 533 | ||
548 | static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed) | 534 | static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed) |
@@ -622,25 +608,26 @@ static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
622 | case XFER_SW_DMA_1: | 608 | case XFER_SW_DMA_1: |
623 | case XFER_SW_DMA_0: | 609 | case XFER_SW_DMA_0: |
624 | break; | 610 | break; |
625 | case XFER_PIO_4: return((int) config_chipset_for_pio(drive, 4)); | 611 | case XFER_PIO_4: |
626 | case XFER_PIO_3: return((int) config_chipset_for_pio(drive, 3)); | 612 | case XFER_PIO_3: |
627 | case XFER_PIO_2: return((int) config_chipset_for_pio(drive, 2)); | 613 | case XFER_PIO_2: |
628 | case XFER_PIO_1: return((int) config_chipset_for_pio(drive, 1)); | 614 | case XFER_PIO_1: |
629 | case XFER_PIO_0: | 615 | case XFER_PIO_0: |
630 | default: return((int) config_chipset_for_pio(drive, 0)); | 616 | return sis5513_tune_drive(drive, speed - XFER_PIO_0); |
617 | default: | ||
618 | BUG(); | ||
619 | break; | ||
631 | } | 620 | } |
632 | 621 | ||
633 | return ((int) ide_config_drive_speed(drive, speed)); | 622 | return ide_config_drive_speed(drive, speed); |
634 | } | ||
635 | |||
636 | static void sis5513_tune_drive (ide_drive_t *drive, u8 pio) | ||
637 | { | ||
638 | (void) config_chipset_for_pio(drive, pio); | ||
639 | } | 623 | } |
640 | 624 | ||
641 | static int sis5513_config_xfer_rate(ide_drive_t *drive) | 625 | static int sis5513_config_xfer_rate(ide_drive_t *drive) |
642 | { | 626 | { |
643 | config_art_rwp_pio(drive, 5); | 627 | /* |
628 | * TODO: always set PIO mode and remove this | ||
629 | */ | ||
630 | sis5513_tuneproc(drive, 255); | ||
644 | 631 | ||
645 | drive->init_speed = 0; | 632 | drive->init_speed = 0; |
646 | 633 | ||
@@ -648,7 +635,7 @@ static int sis5513_config_xfer_rate(ide_drive_t *drive) | |||
648 | return 0; | 635 | return 0; |
649 | 636 | ||
650 | if (ide_use_fast_pio(drive)) | 637 | if (ide_use_fast_pio(drive)) |
651 | sis5513_tune_drive(drive, 5); | 638 | sis5513_tuneproc(drive, 255); |
652 | 639 | ||
653 | return -1; | 640 | return -1; |
654 | } | 641 | } |
@@ -836,7 +823,7 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) | |||
836 | if (!hwif->irq) | 823 | if (!hwif->irq) |
837 | hwif->irq = hwif->channel ? 15 : 14; | 824 | hwif->irq = hwif->channel ? 15 : 14; |
838 | 825 | ||
839 | hwif->tuneproc = &sis5513_tune_drive; | 826 | hwif->tuneproc = &sis5513_tuneproc; |
840 | hwif->speedproc = &sis5513_tune_chipset; | 827 | hwif->speedproc = &sis5513_tune_chipset; |
841 | 828 | ||
842 | if (!(hwif->dma_base)) { | 829 | if (!(hwif->dma_base)) { |
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index fe3b4b91f854..7c383d9cc472 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c | |||
@@ -82,7 +82,14 @@ static u8 sl82c105_tune_pio(ide_drive_t *drive, u8 pio) | |||
82 | 82 | ||
83 | pio = ide_get_best_pio_mode(drive, pio, 5, &p); | 83 | pio = ide_get_best_pio_mode(drive, pio, 5, &p); |
84 | 84 | ||
85 | drive->drive_data = drv_ctrl = get_pio_timings(&p); | 85 | drv_ctrl = get_pio_timings(&p); |
86 | |||
87 | /* | ||
88 | * Store the PIO timings so that we can restore them | ||
89 | * in case DMA will be turned off... | ||
90 | */ | ||
91 | drive->drive_data &= 0xffff0000; | ||
92 | drive->drive_data |= drv_ctrl; | ||
86 | 93 | ||
87 | if (!drive->using_dma) { | 94 | if (!drive->using_dma) { |
88 | /* | 95 | /* |
@@ -100,17 +107,55 @@ static u8 sl82c105_tune_pio(ide_drive_t *drive, u8 pio) | |||
100 | } | 107 | } |
101 | 108 | ||
102 | /* | 109 | /* |
103 | * Configure the drive for DMA. | 110 | * Configure the drive and chipset for a new transfer speed. |
104 | * We'll program the chipset only when DMA is actually turned on. | ||
105 | */ | 111 | */ |
106 | static int config_for_dma(ide_drive_t *drive) | 112 | static int sl82c105_tune_chipset(ide_drive_t *drive, u8 speed) |
107 | { | 113 | { |
108 | DBG(("config_for_dma(drive:%s)\n", drive->name)); | 114 | static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200}; |
115 | u16 drv_ctrl; | ||
109 | 116 | ||
110 | if (ide_config_drive_speed(drive, XFER_MW_DMA_2) != 0) | 117 | DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n", |
111 | return 0; | 118 | drive->name, ide_xfer_verbose(speed))); |
112 | 119 | ||
113 | return ide_dma_enable(drive); | 120 | speed = ide_rate_filter(drive, speed); |
121 | |||
122 | switch (speed) { | ||
123 | case XFER_MW_DMA_2: | ||
124 | case XFER_MW_DMA_1: | ||
125 | case XFER_MW_DMA_0: | ||
126 | drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0]; | ||
127 | |||
128 | /* | ||
129 | * Store the DMA timings so that we can actually program | ||
130 | * them when DMA will be turned on... | ||
131 | */ | ||
132 | drive->drive_data &= 0x0000ffff; | ||
133 | drive->drive_data |= (unsigned long)drv_ctrl << 16; | ||
134 | |||
135 | /* | ||
136 | * If we are already using DMA, we just reprogram | ||
137 | * the drive control register. | ||
138 | */ | ||
139 | if (drive->using_dma) { | ||
140 | struct pci_dev *dev = HWIF(drive)->pci_dev; | ||
141 | int reg = 0x44 + drive->dn * 4; | ||
142 | |||
143 | pci_write_config_word(dev, reg, drv_ctrl); | ||
144 | } | ||
145 | break; | ||
146 | case XFER_PIO_5: | ||
147 | case XFER_PIO_4: | ||
148 | case XFER_PIO_3: | ||
149 | case XFER_PIO_2: | ||
150 | case XFER_PIO_1: | ||
151 | case XFER_PIO_0: | ||
152 | (void) sl82c105_tune_pio(drive, speed - XFER_PIO_0); | ||
153 | break; | ||
154 | default: | ||
155 | return -1; | ||
156 | } | ||
157 | |||
158 | return ide_config_drive_speed(drive, speed); | ||
114 | } | 159 | } |
115 | 160 | ||
116 | /* | 161 | /* |
@@ -120,7 +165,7 @@ static int sl82c105_ide_dma_check(ide_drive_t *drive) | |||
120 | { | 165 | { |
121 | DBG(("sl82c105_ide_dma_check(drive:%s)\n", drive->name)); | 166 | DBG(("sl82c105_ide_dma_check(drive:%s)\n", drive->name)); |
122 | 167 | ||
123 | if (ide_use_dma(drive) && config_for_dma(drive)) | 168 | if (ide_tune_dma(drive)) |
124 | return 0; | 169 | return 0; |
125 | 170 | ||
126 | return -1; | 171 | return -1; |
@@ -219,7 +264,7 @@ static int sl82c105_ide_dma_on(ide_drive_t *drive) | |||
219 | 264 | ||
220 | rc = __ide_dma_on(drive); | 265 | rc = __ide_dma_on(drive); |
221 | if (rc == 0) { | 266 | if (rc == 0) { |
222 | pci_write_config_word(dev, reg, 0x0200); | 267 | pci_write_config_word(dev, reg, drive->drive_data >> 16); |
223 | 268 | ||
224 | printk(KERN_INFO "%s: DMA enabled\n", drive->name); | 269 | printk(KERN_INFO "%s: DMA enabled\n", drive->name); |
225 | } | 270 | } |
@@ -304,7 +349,7 @@ static unsigned int sl82c105_bridge_revision(struct pci_dev *dev) | |||
304 | /* | 349 | /* |
305 | * The bridge should be part of the same device, but function 0. | 350 | * The bridge should be part of the same device, but function 0. |
306 | */ | 351 | */ |
307 | bridge = pci_find_slot(dev->bus->number, | 352 | bridge = pci_get_bus_and_slot(dev->bus->number, |
308 | PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); | 353 | PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); |
309 | if (!bridge) | 354 | if (!bridge) |
310 | return -1; | 355 | return -1; |
@@ -314,13 +359,15 @@ static unsigned int sl82c105_bridge_revision(struct pci_dev *dev) | |||
314 | */ | 359 | */ |
315 | if (bridge->vendor != PCI_VENDOR_ID_WINBOND || | 360 | if (bridge->vendor != PCI_VENDOR_ID_WINBOND || |
316 | bridge->device != PCI_DEVICE_ID_WINBOND_83C553 || | 361 | bridge->device != PCI_DEVICE_ID_WINBOND_83C553 || |
317 | bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) | 362 | bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) { |
363 | pci_dev_put(bridge); | ||
318 | return -1; | 364 | return -1; |
319 | 365 | } | |
320 | /* | 366 | /* |
321 | * We need to find function 0's revision, not function 1 | 367 | * We need to find function 0's revision, not function 1 |
322 | */ | 368 | */ |
323 | pci_read_config_byte(bridge, PCI_REVISION_ID, &rev); | 369 | pci_read_config_byte(bridge, PCI_REVISION_ID, &rev); |
370 | pci_dev_put(bridge); | ||
324 | 371 | ||
325 | return rev; | 372 | return rev; |
326 | } | 373 | } |
@@ -357,6 +404,7 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) | |||
357 | DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index)); | 404 | DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index)); |
358 | 405 | ||
359 | hwif->tuneproc = &sl82c105_tune_drive; | 406 | hwif->tuneproc = &sl82c105_tune_drive; |
407 | hwif->speedproc = &sl82c105_tune_chipset; | ||
360 | hwif->selectproc = &sl82c105_selectproc; | 408 | hwif->selectproc = &sl82c105_selectproc; |
361 | hwif->resetproc = &sl82c105_resetproc; | 409 | hwif->resetproc = &sl82c105_resetproc; |
362 | 410 | ||
@@ -388,7 +436,7 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) | |||
388 | } | 436 | } |
389 | 437 | ||
390 | hwif->atapi_dma = 1; | 438 | hwif->atapi_dma = 1; |
391 | hwif->mwdma_mask = 0x04; | 439 | hwif->mwdma_mask = 0x07; |
392 | 440 | ||
393 | hwif->ide_dma_check = &sl82c105_ide_dma_check; | 441 | hwif->ide_dma_check = &sl82c105_ide_dma_check; |
394 | hwif->ide_dma_on = &sl82c105_ide_dma_on; | 442 | hwif->ide_dma_on = &sl82c105_ide_dma_on; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index fde92ce45153..2eb52b7a71da 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
346 | complete(&id_priv->comp); | 346 | complete(&id_priv->comp); |
347 | } | 347 | } |
348 | 348 | ||
349 | static void cma_release_remove(struct rdma_id_private *id_priv) | 349 | static int cma_disable_remove(struct rdma_id_private *id_priv, |
350 | enum cma_state state) | ||
351 | { | ||
352 | unsigned long flags; | ||
353 | int ret; | ||
354 | |||
355 | spin_lock_irqsave(&id_priv->lock, flags); | ||
356 | if (id_priv->state == state) { | ||
357 | atomic_inc(&id_priv->dev_remove); | ||
358 | ret = 0; | ||
359 | } else | ||
360 | ret = -EINVAL; | ||
361 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | static void cma_enable_remove(struct rdma_id_private *id_priv) | ||
350 | { | 366 | { |
351 | if (atomic_dec_and_test(&id_priv->dev_remove)) | 367 | if (atomic_dec_and_test(&id_priv->dev_remove)) |
352 | wake_up(&id_priv->wait_remove); | 368 | wake_up(&id_priv->wait_remove); |
353 | } | 369 | } |
354 | 370 | ||
371 | static int cma_has_cm_dev(struct rdma_id_private *id_priv) | ||
372 | { | ||
373 | return (id_priv->id.device && id_priv->cm_id.ib); | ||
374 | } | ||
375 | |||
355 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 376 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
356 | void *context, enum rdma_port_space ps) | 377 | void *context, enum rdma_port_space ps) |
357 | { | 378 | { |
@@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
884 | struct rdma_cm_event event; | 905 | struct rdma_cm_event event; |
885 | int ret = 0; | 906 | int ret = 0; |
886 | 907 | ||
887 | atomic_inc(&id_priv->dev_remove); | 908 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
888 | if (!cma_comp(id_priv, CMA_CONNECT)) | 909 | return 0; |
889 | goto out; | ||
890 | 910 | ||
891 | memset(&event, 0, sizeof event); | 911 | memset(&event, 0, sizeof event); |
892 | switch (ib_event->event) { | 912 | switch (ib_event->event) { |
@@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
942 | /* Destroy the CM ID by returning a non-zero value. */ | 962 | /* Destroy the CM ID by returning a non-zero value. */ |
943 | id_priv->cm_id.ib = NULL; | 963 | id_priv->cm_id.ib = NULL; |
944 | cma_exch(id_priv, CMA_DESTROYING); | 964 | cma_exch(id_priv, CMA_DESTROYING); |
945 | cma_release_remove(id_priv); | 965 | cma_enable_remove(id_priv); |
946 | rdma_destroy_id(&id_priv->id); | 966 | rdma_destroy_id(&id_priv->id); |
947 | return ret; | 967 | return ret; |
948 | } | 968 | } |
949 | out: | 969 | out: |
950 | cma_release_remove(id_priv); | 970 | cma_enable_remove(id_priv); |
951 | return ret; | 971 | return ret; |
952 | } | 972 | } |
953 | 973 | ||
@@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1057 | int offset, ret; | 1077 | int offset, ret; |
1058 | 1078 | ||
1059 | listen_id = cm_id->context; | 1079 | listen_id = cm_id->context; |
1060 | atomic_inc(&listen_id->dev_remove); | 1080 | if (cma_disable_remove(listen_id, CMA_LISTEN)) |
1061 | if (!cma_comp(listen_id, CMA_LISTEN)) { | 1081 | return -ECONNABORTED; |
1062 | ret = -ECONNABORTED; | ||
1063 | goto out; | ||
1064 | } | ||
1065 | 1082 | ||
1066 | memset(&event, 0, sizeof event); | 1083 | memset(&event, 0, sizeof event); |
1067 | offset = cma_user_data_offset(listen_id->id.ps); | 1084 | offset = cma_user_data_offset(listen_id->id.ps); |
@@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1101 | 1118 | ||
1102 | release_conn_id: | 1119 | release_conn_id: |
1103 | cma_exch(conn_id, CMA_DESTROYING); | 1120 | cma_exch(conn_id, CMA_DESTROYING); |
1104 | cma_release_remove(conn_id); | 1121 | cma_enable_remove(conn_id); |
1105 | rdma_destroy_id(&conn_id->id); | 1122 | rdma_destroy_id(&conn_id->id); |
1106 | 1123 | ||
1107 | out: | 1124 | out: |
1108 | cma_release_remove(listen_id); | 1125 | cma_enable_remove(listen_id); |
1109 | return ret; | 1126 | return ret; |
1110 | } | 1127 | } |
1111 | 1128 | ||
@@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1171 | struct sockaddr_in *sin; | 1188 | struct sockaddr_in *sin; |
1172 | int ret = 0; | 1189 | int ret = 0; |
1173 | 1190 | ||
1174 | memset(&event, 0, sizeof event); | 1191 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
1175 | atomic_inc(&id_priv->dev_remove); | 1192 | return 0; |
1176 | 1193 | ||
1194 | memset(&event, 0, sizeof event); | ||
1177 | switch (iw_event->event) { | 1195 | switch (iw_event->event) { |
1178 | case IW_CM_EVENT_CLOSE: | 1196 | case IW_CM_EVENT_CLOSE: |
1179 | event.event = RDMA_CM_EVENT_DISCONNECTED; | 1197 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
@@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1214 | /* Destroy the CM ID by returning a non-zero value. */ | 1232 | /* Destroy the CM ID by returning a non-zero value. */ |
1215 | id_priv->cm_id.iw = NULL; | 1233 | id_priv->cm_id.iw = NULL; |
1216 | cma_exch(id_priv, CMA_DESTROYING); | 1234 | cma_exch(id_priv, CMA_DESTROYING); |
1217 | cma_release_remove(id_priv); | 1235 | cma_enable_remove(id_priv); |
1218 | rdma_destroy_id(&id_priv->id); | 1236 | rdma_destroy_id(&id_priv->id); |
1219 | return ret; | 1237 | return ret; |
1220 | } | 1238 | } |
1221 | 1239 | ||
1222 | cma_release_remove(id_priv); | 1240 | cma_enable_remove(id_priv); |
1223 | return ret; | 1241 | return ret; |
1224 | } | 1242 | } |
1225 | 1243 | ||
@@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1234 | int ret; | 1252 | int ret; |
1235 | 1253 | ||
1236 | listen_id = cm_id->context; | 1254 | listen_id = cm_id->context; |
1237 | atomic_inc(&listen_id->dev_remove); | 1255 | if (cma_disable_remove(listen_id, CMA_LISTEN)) |
1238 | if (!cma_comp(listen_id, CMA_LISTEN)) { | 1256 | return -ECONNABORTED; |
1239 | ret = -ECONNABORTED; | ||
1240 | goto out; | ||
1241 | } | ||
1242 | 1257 | ||
1243 | /* Create a new RDMA id for the new IW CM ID */ | 1258 | /* Create a new RDMA id for the new IW CM ID */ |
1244 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | 1259 | new_cm_id = rdma_create_id(listen_id->id.event_handler, |
@@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1255 | dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); | 1270 | dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); |
1256 | if (!dev) { | 1271 | if (!dev) { |
1257 | ret = -EADDRNOTAVAIL; | 1272 | ret = -EADDRNOTAVAIL; |
1258 | cma_release_remove(conn_id); | 1273 | cma_enable_remove(conn_id); |
1259 | rdma_destroy_id(new_cm_id); | 1274 | rdma_destroy_id(new_cm_id); |
1260 | goto out; | 1275 | goto out; |
1261 | } | 1276 | } |
1262 | ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); | 1277 | ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); |
1263 | if (ret) { | 1278 | if (ret) { |
1264 | cma_release_remove(conn_id); | 1279 | cma_enable_remove(conn_id); |
1265 | rdma_destroy_id(new_cm_id); | 1280 | rdma_destroy_id(new_cm_id); |
1266 | goto out; | 1281 | goto out; |
1267 | } | 1282 | } |
@@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1270 | ret = cma_acquire_dev(conn_id); | 1285 | ret = cma_acquire_dev(conn_id); |
1271 | mutex_unlock(&lock); | 1286 | mutex_unlock(&lock); |
1272 | if (ret) { | 1287 | if (ret) { |
1273 | cma_release_remove(conn_id); | 1288 | cma_enable_remove(conn_id); |
1274 | rdma_destroy_id(new_cm_id); | 1289 | rdma_destroy_id(new_cm_id); |
1275 | goto out; | 1290 | goto out; |
1276 | } | 1291 | } |
@@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1293 | /* User wants to destroy the CM ID */ | 1308 | /* User wants to destroy the CM ID */ |
1294 | conn_id->cm_id.iw = NULL; | 1309 | conn_id->cm_id.iw = NULL; |
1295 | cma_exch(conn_id, CMA_DESTROYING); | 1310 | cma_exch(conn_id, CMA_DESTROYING); |
1296 | cma_release_remove(conn_id); | 1311 | cma_enable_remove(conn_id); |
1297 | rdma_destroy_id(&conn_id->id); | 1312 | rdma_destroy_id(&conn_id->id); |
1298 | } | 1313 | } |
1299 | 1314 | ||
1300 | out: | 1315 | out: |
1301 | if (dev) | 1316 | if (dev) |
1302 | dev_put(dev); | 1317 | dev_put(dev); |
1303 | cma_release_remove(listen_id); | 1318 | cma_enable_remove(listen_id); |
1304 | return ret; | 1319 | return ret; |
1305 | } | 1320 | } |
1306 | 1321 | ||
@@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work) | |||
1519 | destroy = 1; | 1534 | destroy = 1; |
1520 | } | 1535 | } |
1521 | out: | 1536 | out: |
1522 | cma_release_remove(id_priv); | 1537 | cma_enable_remove(id_priv); |
1523 | cma_deref_id(id_priv); | 1538 | cma_deref_id(id_priv); |
1524 | if (destroy) | 1539 | if (destroy) |
1525 | rdma_destroy_id(&id_priv->id); | 1540 | rdma_destroy_id(&id_priv->id); |
@@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1711 | 1726 | ||
1712 | if (id_priv->id.event_handler(&id_priv->id, &event)) { | 1727 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
1713 | cma_exch(id_priv, CMA_DESTROYING); | 1728 | cma_exch(id_priv, CMA_DESTROYING); |
1714 | cma_release_remove(id_priv); | 1729 | cma_enable_remove(id_priv); |
1715 | cma_deref_id(id_priv); | 1730 | cma_deref_id(id_priv); |
1716 | rdma_destroy_id(&id_priv->id); | 1731 | rdma_destroy_id(&id_priv->id); |
1717 | return; | 1732 | return; |
1718 | } | 1733 | } |
1719 | out: | 1734 | out: |
1720 | cma_release_remove(id_priv); | 1735 | cma_enable_remove(id_priv); |
1721 | cma_deref_id(id_priv); | 1736 | cma_deref_id(id_priv); |
1722 | } | 1737 | } |
1723 | 1738 | ||
@@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2042 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 2057 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
2043 | int ret = 0; | 2058 | int ret = 0; |
2044 | 2059 | ||
2045 | memset(&event, 0, sizeof event); | 2060 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
2046 | atomic_inc(&id_priv->dev_remove); | 2061 | return 0; |
2047 | if (!cma_comp(id_priv, CMA_CONNECT)) | ||
2048 | goto out; | ||
2049 | 2062 | ||
2063 | memset(&event, 0, sizeof event); | ||
2050 | switch (ib_event->event) { | 2064 | switch (ib_event->event) { |
2051 | case IB_CM_SIDR_REQ_ERROR: | 2065 | case IB_CM_SIDR_REQ_ERROR: |
2052 | event.event = RDMA_CM_EVENT_UNREACHABLE; | 2066 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
@@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2084 | /* Destroy the CM ID by returning a non-zero value. */ | 2098 | /* Destroy the CM ID by returning a non-zero value. */ |
2085 | id_priv->cm_id.ib = NULL; | 2099 | id_priv->cm_id.ib = NULL; |
2086 | cma_exch(id_priv, CMA_DESTROYING); | 2100 | cma_exch(id_priv, CMA_DESTROYING); |
2087 | cma_release_remove(id_priv); | 2101 | cma_enable_remove(id_priv); |
2088 | rdma_destroy_id(&id_priv->id); | 2102 | rdma_destroy_id(&id_priv->id); |
2089 | return ret; | 2103 | return ret; |
2090 | } | 2104 | } |
2091 | out: | 2105 | out: |
2092 | cma_release_remove(id_priv); | 2106 | cma_enable_remove(id_priv); |
2093 | return ret; | 2107 | return ret; |
2094 | } | 2108 | } |
2095 | 2109 | ||
@@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) | |||
2413 | int ret; | 2427 | int ret; |
2414 | 2428 | ||
2415 | id_priv = container_of(id, struct rdma_id_private, id); | 2429 | id_priv = container_of(id, struct rdma_id_private, id); |
2416 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2430 | if (!cma_has_cm_dev(id_priv)) |
2417 | return -EINVAL; | 2431 | return -EINVAL; |
2418 | 2432 | ||
2419 | switch (id->device->node_type) { | 2433 | switch (id->device->node_type) { |
@@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2435 | int ret; | 2449 | int ret; |
2436 | 2450 | ||
2437 | id_priv = container_of(id, struct rdma_id_private, id); | 2451 | id_priv = container_of(id, struct rdma_id_private, id); |
2438 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2452 | if (!cma_has_cm_dev(id_priv)) |
2439 | return -EINVAL; | 2453 | return -EINVAL; |
2440 | 2454 | ||
2441 | switch (rdma_node_get_transport(id->device->node_type)) { | 2455 | switch (rdma_node_get_transport(id->device->node_type)) { |
@@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id) | |||
2466 | int ret; | 2480 | int ret; |
2467 | 2481 | ||
2468 | id_priv = container_of(id, struct rdma_id_private, id); | 2482 | id_priv = container_of(id, struct rdma_id_private, id); |
2469 | if (!cma_comp(id_priv, CMA_CONNECT) && | 2483 | if (!cma_has_cm_dev(id_priv)) |
2470 | !cma_comp(id_priv, CMA_DISCONNECT)) | ||
2471 | return -EINVAL; | 2484 | return -EINVAL; |
2472 | 2485 | ||
2473 | switch (rdma_node_get_transport(id->device->node_type)) { | 2486 | switch (rdma_node_get_transport(id->device->node_type)) { |
@@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2499 | int ret; | 2512 | int ret; |
2500 | 2513 | ||
2501 | id_priv = mc->id_priv; | 2514 | id_priv = mc->id_priv; |
2502 | atomic_inc(&id_priv->dev_remove); | 2515 | if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) && |
2503 | if (!cma_comp(id_priv, CMA_ADDR_BOUND) && | 2516 | cma_disable_remove(id_priv, CMA_ADDR_RESOLVED)) |
2504 | !cma_comp(id_priv, CMA_ADDR_RESOLVED)) | 2517 | return 0; |
2505 | goto out; | ||
2506 | 2518 | ||
2507 | if (!status && id_priv->id.qp) | 2519 | if (!status && id_priv->id.qp) |
2508 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, | 2520 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, |
@@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2524 | ret = id_priv->id.event_handler(&id_priv->id, &event); | 2536 | ret = id_priv->id.event_handler(&id_priv->id, &event); |
2525 | if (ret) { | 2537 | if (ret) { |
2526 | cma_exch(id_priv, CMA_DESTROYING); | 2538 | cma_exch(id_priv, CMA_DESTROYING); |
2527 | cma_release_remove(id_priv); | 2539 | cma_enable_remove(id_priv); |
2528 | rdma_destroy_id(&id_priv->id); | 2540 | rdma_destroy_id(&id_priv->id); |
2529 | return 0; | 2541 | return 0; |
2530 | } | 2542 | } |
2531 | out: | 2543 | |
2532 | cma_release_remove(id_priv); | 2544 | cma_enable_remove(id_priv); |
2533 | return 0; | 2545 | return 0; |
2534 | } | 2546 | } |
2535 | 2547 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index f64d42b08674..1d286d3cc2d5 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void); | |||
277 | 277 | ||
278 | extern spinlock_t ehca_qp_idr_lock; | 278 | extern spinlock_t ehca_qp_idr_lock; |
279 | extern spinlock_t ehca_cq_idr_lock; | 279 | extern spinlock_t ehca_cq_idr_lock; |
280 | extern spinlock_t hcall_lock; | ||
280 | extern struct idr ehca_qp_idr; | 281 | extern struct idr ehca_qp_idr; |
281 | extern struct idr ehca_cq_idr; | 282 | extern struct idr ehca_cq_idr; |
282 | 283 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 82dda2faf4d0..100329ba3343 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
517 | else { | 517 | else { |
518 | struct ehca_cq *cq = eq->eqe_cache[i].cq; | 518 | struct ehca_cq *cq = eq->eqe_cache[i].cq; |
519 | comp_event_callback(cq); | 519 | comp_event_callback(cq); |
520 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 520 | spin_lock(&ehca_cq_idr_lock); |
521 | cq->nr_events--; | 521 | cq->nr_events--; |
522 | if (!cq->nr_events) | 522 | if (!cq->nr_events) |
523 | wake_up(&cq->wait_completion); | 523 | wake_up(&cq->wait_completion); |
524 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 524 | spin_unlock(&ehca_cq_idr_lock); |
525 | flags); | ||
526 | } | 525 | } |
527 | } else { | 526 | } else { |
528 | ehca_dbg(&shca->ib_device, "Got non completion event"); | 527 | ehca_dbg(&shca->ib_device, "Got non completion event"); |
@@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool, | |||
711 | kthread_stop(task); | 710 | kthread_stop(task); |
712 | } | 711 | } |
713 | 712 | ||
713 | #ifdef CONFIG_HOTPLUG_CPU | ||
714 | static void take_over_work(struct ehca_comp_pool *pool, | 714 | static void take_over_work(struct ehca_comp_pool *pool, |
715 | int cpu) | 715 | int cpu) |
716 | { | 716 | { |
@@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
735 | 735 | ||
736 | } | 736 | } |
737 | 737 | ||
738 | #ifdef CONFIG_HOTPLUG_CPU | ||
739 | static int comp_pool_callback(struct notifier_block *nfb, | 738 | static int comp_pool_callback(struct notifier_block *nfb, |
740 | unsigned long action, | 739 | unsigned long action, |
741 | void *hcpu) | 740 | void *hcpu) |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index fe90e7454560..c3f99f33b49c 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
55 | MODULE_VERSION("SVNEHCA_0022"); | 55 | MODULE_VERSION("SVNEHCA_0023"); |
56 | 56 | ||
57 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
58 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0; | |||
62 | int ehca_port_act_time = 30; | 62 | int ehca_port_act_time = 30; |
63 | int ehca_poll_all_eqs = 1; | 63 | int ehca_poll_all_eqs = 1; |
64 | int ehca_static_rate = -1; | 64 | int ehca_static_rate = -1; |
65 | int ehca_scaling_code = 1; | 65 | int ehca_scaling_code = 0; |
66 | 66 | ||
67 | module_param_named(open_aqp1, ehca_open_aqp1, int, 0); | 67 | module_param_named(open_aqp1, ehca_open_aqp1, int, 0); |
68 | module_param_named(debug_level, ehca_debug_level, int, 0); | 68 | module_param_named(debug_level, ehca_debug_level, int, 0); |
@@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code, | |||
98 | 98 | ||
99 | spinlock_t ehca_qp_idr_lock; | 99 | spinlock_t ehca_qp_idr_lock; |
100 | spinlock_t ehca_cq_idr_lock; | 100 | spinlock_t ehca_cq_idr_lock; |
101 | spinlock_t hcall_lock; | ||
101 | DEFINE_IDR(ehca_qp_idr); | 102 | DEFINE_IDR(ehca_qp_idr); |
102 | DEFINE_IDR(ehca_cq_idr); | 103 | DEFINE_IDR(ehca_cq_idr); |
103 | 104 | ||
@@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp, | |||
453 | DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, | 454 | DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, |
454 | ehca_show_debug_level, ehca_store_debug_level); | 455 | ehca_show_debug_level, ehca_store_debug_level); |
455 | 456 | ||
456 | void ehca_create_driver_sysfs(struct ibmebus_driver *drv) | 457 | static struct attribute *ehca_drv_attrs[] = { |
457 | { | 458 | &driver_attr_debug_level.attr, |
458 | driver_create_file(&drv->driver, &driver_attr_debug_level); | 459 | NULL |
459 | } | 460 | }; |
460 | 461 | ||
461 | void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) | 462 | static struct attribute_group ehca_drv_attr_grp = { |
462 | { | 463 | .attrs = ehca_drv_attrs |
463 | driver_remove_file(&drv->driver, &driver_attr_debug_level); | 464 | }; |
464 | } | ||
465 | 465 | ||
466 | #define EHCA_RESOURCE_ATTR(name) \ | 466 | #define EHCA_RESOURCE_ATTR(name) \ |
467 | static ssize_t ehca_show_##name(struct device *dev, \ | 467 | static ssize_t ehca_show_##name(struct device *dev, \ |
@@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, | |||
523 | } | 523 | } |
524 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); | 524 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); |
525 | 525 | ||
526 | static struct attribute *ehca_dev_attrs[] = { | ||
527 | &dev_attr_adapter_handle.attr, | ||
528 | &dev_attr_num_ports.attr, | ||
529 | &dev_attr_hw_ver.attr, | ||
530 | &dev_attr_max_eq.attr, | ||
531 | &dev_attr_cur_eq.attr, | ||
532 | &dev_attr_max_cq.attr, | ||
533 | &dev_attr_cur_cq.attr, | ||
534 | &dev_attr_max_qp.attr, | ||
535 | &dev_attr_cur_qp.attr, | ||
536 | &dev_attr_max_mr.attr, | ||
537 | &dev_attr_cur_mr.attr, | ||
538 | &dev_attr_max_mw.attr, | ||
539 | &dev_attr_cur_mw.attr, | ||
540 | &dev_attr_max_pd.attr, | ||
541 | &dev_attr_max_ah.attr, | ||
542 | NULL | ||
543 | }; | ||
526 | 544 | ||
527 | void ehca_create_device_sysfs(struct ibmebus_dev *dev) | 545 | static struct attribute_group ehca_dev_attr_grp = { |
528 | { | 546 | .attrs = ehca_dev_attrs |
529 | device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | 547 | }; |
530 | device_create_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
531 | device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
532 | device_create_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
533 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
534 | device_create_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
535 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
536 | device_create_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
537 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
538 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
539 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
540 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
541 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
542 | device_create_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
543 | device_create_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
544 | } | ||
545 | |||
546 | void ehca_remove_device_sysfs(struct ibmebus_dev *dev) | ||
547 | { | ||
548 | device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | ||
549 | device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
550 | device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
551 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
552 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
553 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
554 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
555 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
556 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
557 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
558 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
559 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
560 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
561 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
562 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
563 | } | ||
564 | 548 | ||
565 | static int __devinit ehca_probe(struct ibmebus_dev *dev, | 549 | static int __devinit ehca_probe(struct ibmebus_dev *dev, |
566 | const struct of_device_id *id) | 550 | const struct of_device_id *id) |
@@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, | |||
668 | } | 652 | } |
669 | } | 653 | } |
670 | 654 | ||
671 | ehca_create_device_sysfs(dev); | 655 | ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); |
656 | if (ret) /* only complain; we can live without attributes */ | ||
657 | ehca_err(&shca->ib_device, | ||
658 | "Cannot create device attributes ret=%d", ret); | ||
672 | 659 | ||
673 | spin_lock(&shca_list_lock); | 660 | spin_lock(&shca_list_lock); |
674 | list_add(&shca->shca_list, &shca_list); | 661 | list_add(&shca->shca_list, &shca_list); |
@@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev) | |||
720 | struct ehca_shca *shca = dev->ofdev.dev.driver_data; | 707 | struct ehca_shca *shca = dev->ofdev.dev.driver_data; |
721 | int ret; | 708 | int ret; |
722 | 709 | ||
723 | ehca_remove_device_sysfs(dev); | 710 | sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); |
724 | 711 | ||
725 | if (ehca_open_aqp1 == 1) { | 712 | if (ehca_open_aqp1 == 1) { |
726 | int i; | 713 | int i; |
@@ -812,11 +799,12 @@ int __init ehca_module_init(void) | |||
812 | int ret; | 799 | int ret; |
813 | 800 | ||
814 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 801 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
815 | "(Rel.: SVNEHCA_0022)\n"); | 802 | "(Rel.: SVNEHCA_0023)\n"); |
816 | idr_init(&ehca_qp_idr); | 803 | idr_init(&ehca_qp_idr); |
817 | idr_init(&ehca_cq_idr); | 804 | idr_init(&ehca_cq_idr); |
818 | spin_lock_init(&ehca_qp_idr_lock); | 805 | spin_lock_init(&ehca_qp_idr_lock); |
819 | spin_lock_init(&ehca_cq_idr_lock); | 806 | spin_lock_init(&ehca_cq_idr_lock); |
807 | spin_lock_init(&hcall_lock); | ||
820 | 808 | ||
821 | INIT_LIST_HEAD(&shca_list); | 809 | INIT_LIST_HEAD(&shca_list); |
822 | spin_lock_init(&shca_list_lock); | 810 | spin_lock_init(&shca_list_lock); |
@@ -838,7 +826,9 @@ int __init ehca_module_init(void) | |||
838 | goto module_init2; | 826 | goto module_init2; |
839 | } | 827 | } |
840 | 828 | ||
841 | ehca_create_driver_sysfs(&ehca_driver); | 829 | ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp); |
830 | if (ret) /* only complain; we can live without attributes */ | ||
831 | ehca_gen_err("Cannot create driver attributes ret=%d", ret); | ||
842 | 832 | ||
843 | if (ehca_poll_all_eqs != 1) { | 833 | if (ehca_poll_all_eqs != 1) { |
844 | ehca_gen_err("WARNING!!!"); | 834 | ehca_gen_err("WARNING!!!"); |
@@ -865,7 +855,7 @@ void __exit ehca_module_exit(void) | |||
865 | if (ehca_poll_all_eqs == 1) | 855 | if (ehca_poll_all_eqs == 1) |
866 | del_timer_sync(&poll_eqs_timer); | 856 | del_timer_sync(&poll_eqs_timer); |
867 | 857 | ||
868 | ehca_remove_driver_sysfs(&ehca_driver); | 858 | sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp); |
869 | ibmebus_unregister_driver(&ehca_driver); | 859 | ibmebus_unregister_driver(&ehca_driver); |
870 | 860 | ||
871 | ehca_destroy_slab_caches(); | 861 | ehca_destroy_slab_caches(); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index df0516f24379..b5bc787c77b6 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
523 | goto create_qp_exit1; | 523 | goto create_qp_exit1; |
524 | } | 524 | } |
525 | 525 | ||
526 | my_qp->ib_qp.qp_num = my_qp->real_qp_num; | ||
527 | |||
526 | switch (init_attr->qp_type) { | 528 | switch (init_attr->qp_type) { |
527 | case IB_QPT_RC: | 529 | case IB_QPT_RC: |
528 | if (isdaqp == 0) { | 530 | if (isdaqp == 0) { |
@@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
568 | parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; | 570 | parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; |
569 | parms.act_nr_send_sges = init_attr->cap.max_send_sge; | 571 | parms.act_nr_send_sges = init_attr->cap.max_send_sge; |
570 | parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; | 572 | parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; |
571 | my_qp->real_qp_num = | 573 | my_qp->ib_qp.qp_num = |
572 | (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; | 574 | (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; |
573 | } | 575 | } |
574 | 576 | ||
@@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
595 | my_qp->ib_qp.recv_cq = init_attr->recv_cq; | 597 | my_qp->ib_qp.recv_cq = init_attr->recv_cq; |
596 | my_qp->ib_qp.send_cq = init_attr->send_cq; | 598 | my_qp->ib_qp.send_cq = init_attr->send_cq; |
597 | 599 | ||
598 | my_qp->ib_qp.qp_num = my_qp->real_qp_num; | ||
599 | my_qp->ib_qp.qp_type = init_attr->qp_type; | 600 | my_qp->ib_qp.qp_type = init_attr->qp_type; |
600 | 601 | ||
601 | my_qp->qp_type = init_attr->qp_type; | 602 | my_qp->qp_type = init_attr->qp_type; |
@@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
968 | ((ehca_mult - 1) / ah_mult) : 0; | 969 | ((ehca_mult - 1) / ah_mult) : 0; |
969 | else | 970 | else |
970 | mqpcb->max_static_rate = 0; | 971 | mqpcb->max_static_rate = 0; |
971 | |||
972 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); | 972 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); |
973 | 973 | ||
974 | /* | 974 | /* |
975 | * Always supply the GRH flag, even if it's zero, to give the | ||
976 | * hypervisor a clear "yes" or "no" instead of a "perhaps" | ||
977 | */ | ||
978 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); | ||
979 | |||
980 | /* | ||
975 | * only if GRH is TRUE we might consider SOURCE_GID_IDX | 981 | * only if GRH is TRUE we might consider SOURCE_GID_IDX |
976 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! | 982 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! |
977 | */ | 983 | */ |
978 | if (attr->ah_attr.ah_flags == IB_AH_GRH) { | 984 | if (attr->ah_attr.ah_flags == IB_AH_GRH) { |
979 | mqpcb->send_grh_flag = 1 << 31; | 985 | mqpcb->send_grh_flag = 1; |
980 | update_mask |= | 986 | |
981 | EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); | ||
982 | mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; | 987 | mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; |
983 | update_mask |= | 988 | update_mask |= |
984 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); | 989 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index b564fcd3b282..7f0beec74f70 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
154 | unsigned long arg9) | 154 | unsigned long arg9) |
155 | { | 155 | { |
156 | long ret; | 156 | long ret; |
157 | int i, sleep_msecs; | 157 | int i, sleep_msecs, lock_is_set = 0; |
158 | unsigned long flags; | ||
158 | 159 | ||
159 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " | 160 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " |
160 | "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", | 161 | "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", |
@@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
162 | arg8, arg9); | 163 | arg8, arg9); |
163 | 164 | ||
164 | for (i = 0; i < 5; i++) { | 165 | for (i = 0; i < 5; i++) { |
166 | if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) { | ||
167 | spin_lock_irqsave(&hcall_lock, flags); | ||
168 | lock_is_set = 1; | ||
169 | } | ||
170 | |||
165 | ret = plpar_hcall9(opcode, outs, | 171 | ret = plpar_hcall9(opcode, outs, |
166 | arg1, arg2, arg3, arg4, arg5, | 172 | arg1, arg2, arg3, arg4, arg5, |
167 | arg6, arg7, arg8, arg9); | 173 | arg6, arg7, arg8, arg9); |
168 | 174 | ||
175 | if (lock_is_set) | ||
176 | spin_unlock_irqrestore(&hcall_lock, flags); | ||
177 | |||
169 | if (H_IS_LONG_BUSY(ret)) { | 178 | if (H_IS_LONG_BUSY(ret)) { |
170 | sleep_msecs = get_longbusy_msecs(ret); | 179 | sleep_msecs = get_longbusy_msecs(ret); |
171 | msleep_interruptible(sleep_msecs); | 180 | msleep_interruptible(sleep_msecs); |
@@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
193 | opcode, ret, outs[0], outs[1], outs[2], outs[3], | 202 | opcode, ret, outs[0], outs[1], outs[2], outs[3], |
194 | outs[4], outs[5], outs[6], outs[7], outs[8]); | 203 | outs[4], outs[5], outs[6], outs[7], outs[8]); |
195 | return ret; | 204 | return ret; |
196 | |||
197 | } | 205 | } |
198 | 206 | ||
199 | return H_BUSY; | 207 | return H_BUSY; |
200 | } | 208 | } |
209 | |||
201 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, | 210 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, |
202 | struct ehca_pfeq *pfeq, | 211 | struct ehca_pfeq *pfeq, |
203 | const u32 neq_control, | 212 | const u32 neq_control, |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index 1b9c30857754..4e2e3dfeb2c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | |||
747 | 747 | ||
748 | static int ipath_pe_intconfig(struct ipath_devdata *dd) | 748 | static int ipath_pe_intconfig(struct ipath_devdata *dd) |
749 | { | 749 | { |
750 | u64 val; | ||
751 | u32 chiprev; | 750 | u32 chiprev; |
752 | 751 | ||
753 | /* | 752 | /* |
@@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd) | |||
760 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { | 759 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { |
761 | /* Rev2+ reports extra errors via internal GPIO pins */ | 760 | /* Rev2+ reports extra errors via internal GPIO pins */ |
762 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; | 761 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; |
763 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 762 | dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; |
764 | val |= IPATH_GPIO_ERRINTR_MASK; | 763 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
765 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 764 | dd->ipath_gpio_mask); |
766 | } | 765 | } |
767 | return 0; | 766 | return 0; |
768 | } | 767 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 45d033169c6e..a90d3b5699c4 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1056 | gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); | 1056 | gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); |
1057 | chk0rcv = 1; | 1057 | chk0rcv = 1; |
1058 | } | 1058 | } |
1059 | if (unlikely(gpiostatus)) { | 1059 | if (gpiostatus) { |
1060 | /* | 1060 | /* |
1061 | * Some unexpected bits remain. If they could have | 1061 | * Some unexpected bits remain. If they could have |
1062 | * caused the interrupt, complain and clear. | 1062 | * caused the interrupt, complain and clear. |
@@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1065 | * GPIO interrupts, possibly on a "three strikes" | 1065 | * GPIO interrupts, possibly on a "three strikes" |
1066 | * basis. | 1066 | * basis. |
1067 | */ | 1067 | */ |
1068 | u32 mask; | 1068 | const u32 mask = (u32) dd->ipath_gpio_mask; |
1069 | mask = ipath_read_kreg32( | 1069 | |
1070 | dd, dd->ipath_kregs->kr_gpio_mask); | ||
1071 | if (mask & gpiostatus) { | 1070 | if (mask & gpiostatus) { |
1072 | ipath_dbg("Unexpected GPIO IRQ bits %x\n", | 1071 | ipath_dbg("Unexpected GPIO IRQ bits %x\n", |
1073 | gpiostatus & mask); | 1072 | gpiostatus & mask); |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e900c2593f44..12194f3dd8cc 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -397,6 +397,8 @@ struct ipath_devdata { | |||
397 | unsigned long ipath_pioavailshadow[8]; | 397 | unsigned long ipath_pioavailshadow[8]; |
398 | /* shadow of kr_gpio_out, for rmw ops */ | 398 | /* shadow of kr_gpio_out, for rmw ops */ |
399 | u64 ipath_gpio_out; | 399 | u64 ipath_gpio_out; |
400 | /* shadow the gpio mask register */ | ||
401 | u64 ipath_gpio_mask; | ||
400 | /* kr_revision shadow */ | 402 | /* kr_revision shadow */ |
401 | u64 ipath_revision; | 403 | u64 ipath_revision; |
402 | /* | 404 | /* |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 12933e77c7e9..bb70845279b8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd) | |||
1387 | * processing. | 1387 | * processing. |
1388 | */ | 1388 | */ |
1389 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | 1389 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
1390 | u64 val; | ||
1391 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | 1390 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, |
1392 | 0x2074076542310ULL); | 1391 | 0x2074076542310ULL); |
1393 | /* Enable GPIO bit 2 interrupt */ | 1392 | /* Enable GPIO bit 2 interrupt */ |
1394 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 1393 | dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT); |
1395 | val |= (u64) (1 << IPATH_GPIO_PORT0_BIT); | 1394 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
1396 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 1395 | dd->ipath_gpio_mask); |
1397 | } | 1396 | } |
1398 | 1397 | ||
1399 | init_timer(&dd->verbs_timer); | 1398 | init_timer(&dd->verbs_timer); |
@@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd) | |||
1412 | u64 val; | 1411 | u64 val; |
1413 | /* Disable GPIO bit 2 interrupt */ | 1412 | /* Disable GPIO bit 2 interrupt */ |
1414 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 1413 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); |
1415 | val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); | 1414 | dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); |
1416 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 1415 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
1416 | dd->ipath_gpio_mask); | ||
1417 | /* | 1417 | /* |
1418 | * We might want to undo changes to debugportselect, | 1418 | * We might want to undo changes to debugportselect, |
1419 | * but how? | 1419 | * but how? |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 688ecb4c39f3..402f3a20ec0a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
489 | ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | 489 | ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); |
490 | if (!ibdev->uar_map) | 490 | if (!ibdev->uar_map) |
491 | goto err_uar; | 491 | goto err_uar; |
492 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | ||
492 | 493 | ||
493 | INIT_LIST_HEAD(&ibdev->pgdir_list); | 494 | INIT_LIST_HEAD(&ibdev->pgdir_list); |
494 | mutex_init(&ibdev->pgdir_mutex); | 495 | mutex_init(&ibdev->pgdir_mutex); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index cf0868f6e965..ca224d018af2 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, | |||
284 | { | 284 | { |
285 | struct mthca_cqe *cqe; | 285 | struct mthca_cqe *cqe; |
286 | u32 prod_index; | 286 | u32 prod_index; |
287 | int nfreed = 0; | 287 | int i, nfreed = 0; |
288 | 288 | ||
289 | spin_lock_irq(&cq->lock); | 289 | spin_lock_irq(&cq->lock); |
290 | 290 | ||
@@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, | |||
321 | } | 321 | } |
322 | 322 | ||
323 | if (nfreed) { | 323 | if (nfreed) { |
324 | for (i = 0; i < nfreed; ++i) | ||
325 | set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); | ||
324 | wmb(); | 326 | wmb(); |
325 | cq->cons_index += nfreed; | 327 | cq->cons_index += nfreed; |
326 | update_cons_index(dev, cq, nfreed); | 328 | update_cons_index(dev, cq, nfreed); |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index fee60c852d14..72fabb822f1c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1862 | dev->kar + MTHCA_RECEIVE_DOORBELL, | 1862 | dev->kar + MTHCA_RECEIVE_DOORBELL, |
1863 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 1863 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
1864 | 1864 | ||
1865 | qp->rq.next_ind = ind; | ||
1865 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | 1866 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; |
1866 | size0 = 0; | 1867 | size0 = 0; |
1867 | } | 1868 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 785bc8505f2a..eec833b81e9b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
257 | cm_id->context = p; | 257 | cm_id->context = p; |
258 | p->jiffies = jiffies; | 258 | p->jiffies = jiffies; |
259 | spin_lock_irq(&priv->lock); | 259 | spin_lock_irq(&priv->lock); |
260 | if (list_empty(&priv->cm.passive_ids)) | ||
261 | queue_delayed_work(ipoib_workqueue, | ||
262 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
260 | list_add(&p->list, &priv->cm.passive_ids); | 263 | list_add(&p->list, &priv->cm.passive_ids); |
261 | spin_unlock_irq(&priv->lock); | 264 | spin_unlock_irq(&priv->lock); |
262 | queue_delayed_work(ipoib_workqueue, | ||
263 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
264 | return 0; | 265 | return 0; |
265 | 266 | ||
266 | err_rep: | 267 | err_rep: |
@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
378 | if (!list_empty(&p->list)) | 379 | if (!list_empty(&p->list)) |
379 | list_move(&p->list, &priv->cm.passive_ids); | 380 | list_move(&p->list, &priv->cm.passive_ids); |
380 | spin_unlock_irqrestore(&priv->lock, flags); | 381 | spin_unlock_irqrestore(&priv->lock, flags); |
381 | queue_delayed_work(ipoib_workqueue, | ||
382 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
383 | } | 382 | } |
384 | } | 383 | } |
385 | 384 | ||
@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1100 | kfree(p); | 1099 | kfree(p); |
1101 | spin_lock_irq(&priv->lock); | 1100 | spin_lock_irq(&priv->lock); |
1102 | } | 1101 | } |
1102 | |||
1103 | if (!list_empty(&priv->cm.passive_ids)) | ||
1104 | queue_delayed_work(ipoib_workqueue, | ||
1105 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
1103 | spin_unlock_irq(&priv->lock); | 1106 | spin_unlock_irq(&priv->lock); |
1104 | } | 1107 | } |
1105 | 1108 | ||
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index d24ab234394c..a7562f7fc0b3 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -45,8 +45,6 @@ | |||
45 | */ | 45 | */ |
46 | #define MMC_SHIFT 3 | 46 | #define MMC_SHIFT 3 |
47 | 47 | ||
48 | static int major; | ||
49 | |||
50 | /* | 48 | /* |
51 | * There is one mmc_blk_data per slot. | 49 | * There is one mmc_blk_data per slot. |
52 | */ | 50 | */ |
@@ -466,7 +464,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |||
466 | md->queue.issue_fn = mmc_blk_issue_rq; | 464 | md->queue.issue_fn = mmc_blk_issue_rq; |
467 | md->queue.data = md; | 465 | md->queue.data = md; |
468 | 466 | ||
469 | md->disk->major = major; | 467 | md->disk->major = MMC_BLOCK_MAJOR; |
470 | md->disk->first_minor = devidx << MMC_SHIFT; | 468 | md->disk->first_minor = devidx << MMC_SHIFT; |
471 | md->disk->fops = &mmc_bdops; | 469 | md->disk->fops = &mmc_bdops; |
472 | md->disk->private_data = md; | 470 | md->disk->private_data = md; |
@@ -634,14 +632,9 @@ static int __init mmc_blk_init(void) | |||
634 | { | 632 | { |
635 | int res = -ENOMEM; | 633 | int res = -ENOMEM; |
636 | 634 | ||
637 | res = register_blkdev(major, "mmc"); | 635 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
638 | if (res < 0) { | 636 | if (res) |
639 | printk(KERN_WARNING "Unable to get major %d for MMC media: %d\n", | ||
640 | major, res); | ||
641 | goto out; | 637 | goto out; |
642 | } | ||
643 | if (major == 0) | ||
644 | major = res; | ||
645 | 638 | ||
646 | return mmc_register_driver(&mmc_driver); | 639 | return mmc_register_driver(&mmc_driver); |
647 | 640 | ||
@@ -652,7 +645,7 @@ static int __init mmc_blk_init(void) | |||
652 | static void __exit mmc_blk_exit(void) | 645 | static void __exit mmc_blk_exit(void) |
653 | { | 646 | { |
654 | mmc_unregister_driver(&mmc_driver); | 647 | mmc_unregister_driver(&mmc_driver); |
655 | unregister_blkdev(major, "mmc"); | 648 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
656 | } | 649 | } |
657 | 650 | ||
658 | module_init(mmc_blk_init); | 651 | module_init(mmc_blk_init); |
@@ -661,5 +654,3 @@ module_exit(mmc_blk_exit); | |||
661 | MODULE_LICENSE("GPL"); | 654 | MODULE_LICENSE("GPL"); |
662 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | 655 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); |
663 | 656 | ||
664 | module_param(major, int, 0444); | ||
665 | MODULE_PARM_DESC(major, "specify the major device number for MMC block driver"); | ||
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index b7156a4555b5..f967226d7505 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c | |||
@@ -187,9 +187,8 @@ static void au1xmmc_tasklet_finish(unsigned long param) | |||
187 | } | 187 | } |
188 | 188 | ||
189 | static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | 189 | static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, |
190 | struct mmc_command *cmd) | 190 | struct mmc_command *cmd, unsigned int flags) |
191 | { | 191 | { |
192 | |||
193 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); | 192 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); |
194 | 193 | ||
195 | switch (mmc_resp_type(cmd)) { | 194 | switch (mmc_resp_type(cmd)) { |
@@ -213,24 +212,16 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
213 | return MMC_ERR_INVALID; | 212 | return MMC_ERR_INVALID; |
214 | } | 213 | } |
215 | 214 | ||
216 | switch(cmd->opcode) { | 215 | if (flags & MMC_DATA_READ) { |
217 | case MMC_READ_SINGLE_BLOCK: | 216 | if (flags & MMC_DATA_MULTI) |
218 | case SD_APP_SEND_SCR: | 217 | mmccmd |= SD_CMD_CT_4; |
219 | mmccmd |= SD_CMD_CT_2; | 218 | else |
220 | break; | 219 | mmccmd |= SD_CMD_CT_2; |
221 | case MMC_READ_MULTIPLE_BLOCK: | 220 | } else if (flags & MMC_DATA_WRITE) { |
222 | mmccmd |= SD_CMD_CT_4; | 221 | if (flags & MMC_DATA_MULTI) |
223 | break; | 222 | mmccmd |= SD_CMD_CT_3; |
224 | case MMC_WRITE_BLOCK: | 223 | else |
225 | mmccmd |= SD_CMD_CT_1; | 224 | mmccmd |= SD_CMD_CT_1; |
226 | break; | ||
227 | |||
228 | case MMC_WRITE_MULTIPLE_BLOCK: | ||
229 | mmccmd |= SD_CMD_CT_3; | ||
230 | break; | ||
231 | case MMC_STOP_TRANSMISSION: | ||
232 | mmccmd |= SD_CMD_CT_7; | ||
233 | break; | ||
234 | } | 225 | } |
235 | 226 | ||
236 | au_writel(cmd->arg, HOST_CMDARG(host)); | 227 | au_writel(cmd->arg, HOST_CMDARG(host)); |
@@ -665,6 +656,7 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) | |||
665 | { | 656 | { |
666 | 657 | ||
667 | struct au1xmmc_host *host = mmc_priv(mmc); | 658 | struct au1xmmc_host *host = mmc_priv(mmc); |
659 | unsigned int flags = 0; | ||
668 | int ret = MMC_ERR_NONE; | 660 | int ret = MMC_ERR_NONE; |
669 | 661 | ||
670 | WARN_ON(irqs_disabled()); | 662 | WARN_ON(irqs_disabled()); |
@@ -677,11 +669,12 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) | |||
677 | 669 | ||
678 | if (mrq->data) { | 670 | if (mrq->data) { |
679 | FLUSH_FIFO(host); | 671 | FLUSH_FIFO(host); |
672 | flags = mrq->data->flags; | ||
680 | ret = au1xmmc_prepare_data(host, mrq->data); | 673 | ret = au1xmmc_prepare_data(host, mrq->data); |
681 | } | 674 | } |
682 | 675 | ||
683 | if (ret == MMC_ERR_NONE) | 676 | if (ret == MMC_ERR_NONE) |
684 | ret = au1xmmc_send_command(host, 0, mrq->cmd); | 677 | ret = au1xmmc_send_command(host, 0, mrq->cmd, flags); |
685 | 678 | ||
686 | if (ret != MMC_ERR_NONE) { | 679 | if (ret != MMC_ERR_NONE) { |
687 | mrq->cmd->error = ret; | 680 | mrq->cmd->error = ret; |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index d97d3864b57f..f8985c508bb9 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -232,20 +232,14 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) | |||
232 | /* | 232 | /* |
233 | * workaround for erratum #42: | 233 | * workaround for erratum #42: |
234 | * Intel PXA27x Family Processor Specification Update Rev 001 | 234 | * Intel PXA27x Family Processor Specification Update Rev 001 |
235 | * A bogus CRC error can appear if the msb of a 136 bit | ||
236 | * response is a one. | ||
235 | */ | 237 | */ |
236 | if (cmd->opcode == MMC_ALL_SEND_CID || | 238 | if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) { |
237 | cmd->opcode == MMC_SEND_CSD || | 239 | pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); |
238 | cmd->opcode == MMC_SEND_CID) { | 240 | } else |
239 | /* a bogus CRC error can appear if the msb of | ||
240 | the 15 byte response is a one */ | ||
241 | if ((cmd->resp[0] & 0x80000000) == 0) | ||
242 | cmd->error = MMC_ERR_BADCRC; | ||
243 | } else { | ||
244 | pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode); | ||
245 | } | ||
246 | #else | ||
247 | cmd->error = MMC_ERR_BADCRC; | ||
248 | #endif | 241 | #endif |
242 | cmd->error = MMC_ERR_BADCRC; | ||
249 | } | 243 | } |
250 | 244 | ||
251 | pxamci_disable_irq(host, END_CMD_RES); | 245 | pxamci_disable_irq(host, END_CMD_RES); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ff5bf73cdd25..a359efdd77eb 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -963,6 +963,15 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
963 | if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) | 963 | if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) |
964 | sdhci_transfer_pio(host); | 964 | sdhci_transfer_pio(host); |
965 | 965 | ||
966 | /* | ||
967 | * We currently don't do anything fancy with DMA | ||
968 | * boundaries, but as we can't disable the feature | ||
969 | * we need to at least restart the transfer. | ||
970 | */ | ||
971 | if (intmask & SDHCI_INT_DMA_END) | ||
972 | writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS), | ||
973 | host->ioaddr + SDHCI_DMA_ADDRESS); | ||
974 | |||
966 | if (intmask & SDHCI_INT_DATA_END) | 975 | if (intmask & SDHCI_INT_DATA_END) |
967 | sdhci_finish_data(host); | 976 | sdhci_finish_data(host); |
968 | } | 977 | } |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index fb99cd445504..c5baa197bc08 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2508,6 +2508,7 @@ config MLX4_CORE | |||
2508 | 2508 | ||
2509 | config MLX4_DEBUG | 2509 | config MLX4_DEBUG |
2510 | bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) | 2510 | bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) |
2511 | depends on MLX4_CORE | ||
2511 | default y | 2512 | default y |
2512 | ---help--- | 2513 | ---help--- |
2513 | This option causes debugging code to be compiled into the | 2514 | This option causes debugging code to be compiled into the |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 4debb024eaf9..20b8c0d3ced4 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) | |||
542 | struct mlx4_priv *priv = mlx4_priv(dev); | 542 | struct mlx4_priv *priv = mlx4_priv(dev); |
543 | int err; | 543 | int err; |
544 | 544 | ||
545 | MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock); | ||
546 | |||
547 | err = mlx4_init_uar_table(dev); | 545 | err = mlx4_init_uar_table(dev); |
548 | if (err) { | 546 | if (err) { |
549 | mlx4_err(dev, "Failed to initialize " | 547 | mlx4_err(dev, "Failed to initialize " |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 9befbae3d196..3d3b6d24d8d3 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -275,7 +275,6 @@ struct mlx4_priv { | |||
275 | 275 | ||
276 | struct mlx4_uar driver_uar; | 276 | struct mlx4_uar driver_uar; |
277 | void __iomem *kar; | 277 | void __iomem *kar; |
278 | MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock) | ||
279 | 278 | ||
280 | u32 rev_id; | 279 | u32 rev_id; |
281 | char board_id[MLX4_BOARD_ID_LEN]; | 280 | char board_id[MLX4_BOARD_ID_LEN]; |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index bc7f3dee6e5b..8d38425e46c3 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -85,6 +85,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
85 | { | 85 | { |
86 | struct pci_dev *pdev = mac->pdev; | 86 | struct pci_dev *pdev = mac->pdev; |
87 | struct device_node *dn = pci_device_to_OF_node(pdev); | 87 | struct device_node *dn = pci_device_to_OF_node(pdev); |
88 | int len; | ||
88 | const u8 *maddr; | 89 | const u8 *maddr; |
89 | u8 addr[6]; | 90 | u8 addr[6]; |
90 | 91 | ||
@@ -94,9 +95,17 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
94 | return -ENOENT; | 95 | return -ENOENT; |
95 | } | 96 | } |
96 | 97 | ||
97 | maddr = of_get_property(dn, "local-mac-address", NULL); | 98 | maddr = of_get_property(dn, "local-mac-address", &len); |
99 | |||
100 | if (maddr && len == 6) { | ||
101 | memcpy(mac->mac_addr, maddr, 6); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /* Some old versions of firmware mistakenly uses mac-address | ||
106 | * (and as a string) instead of a byte array in local-mac-address. | ||
107 | */ | ||
98 | 108 | ||
99 | /* Fall back to mac-address for older firmware */ | ||
100 | if (maddr == NULL) | 109 | if (maddr == NULL) |
101 | maddr = of_get_property(dn, "mac-address", NULL); | 110 | maddr = of_get_property(dn, "mac-address", NULL); |
102 | 111 | ||
@@ -106,6 +115,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
106 | return -ENOENT; | 115 | return -ENOENT; |
107 | } | 116 | } |
108 | 117 | ||
118 | |||
109 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], | 119 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], |
110 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { | 120 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { |
111 | dev_warn(&pdev->dev, | 121 | dev_warn(&pdev->dev, |
@@ -113,7 +123,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
113 | return -EINVAL; | 123 | return -EINVAL; |
114 | } | 124 | } |
115 | 125 | ||
116 | memcpy(mac->mac_addr, addr, sizeof(addr)); | 126 | memcpy(mac->mac_addr, addr, 6); |
127 | |||
117 | return 0; | 128 | return 0; |
118 | } | 129 | } |
119 | 130 | ||
@@ -384,17 +395,14 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev) | |||
384 | 395 | ||
385 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) | 396 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) |
386 | { | 397 | { |
387 | unsigned int reg, stat; | 398 | unsigned int reg, pcnt; |
388 | /* Re-enable packet count interrupts: finally | 399 | /* Re-enable packet count interrupts: finally |
389 | * ack the packet count interrupt we got in rx_intr. | 400 | * ack the packet count interrupt we got in rx_intr. |
390 | */ | 401 | */ |
391 | 402 | ||
392 | pci_read_config_dword(mac->iob_pdev, | 403 | pcnt = *mac->rx_status & PAS_STATUS_PCNT_M; |
393 | PAS_IOB_DMA_RXCH_STAT(mac->dma_rxch), | ||
394 | &stat); | ||
395 | 404 | ||
396 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(stat & PAS_IOB_DMA_RXCH_STAT_CNTDEL_M) | 405 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; |
397 | | PAS_IOB_DMA_RXCH_RESET_PINTC; | ||
398 | 406 | ||
399 | pci_write_config_dword(mac->iob_pdev, | 407 | pci_write_config_dword(mac->iob_pdev, |
400 | PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), | 408 | PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), |
@@ -403,14 +411,12 @@ static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) | |||
403 | 411 | ||
404 | static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) | 412 | static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) |
405 | { | 413 | { |
406 | unsigned int reg, stat; | 414 | unsigned int reg, pcnt; |
407 | 415 | ||
408 | /* Re-enable packet count interrupts */ | 416 | /* Re-enable packet count interrupts */ |
409 | pci_read_config_dword(mac->iob_pdev, | 417 | pcnt = *mac->tx_status & PAS_STATUS_PCNT_M; |
410 | PAS_IOB_DMA_TXCH_STAT(mac->dma_txch), &stat); | ||
411 | 418 | ||
412 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(stat & PAS_IOB_DMA_TXCH_STAT_CNTDEL_M) | 419 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; |
413 | | PAS_IOB_DMA_TXCH_RESET_PINTC; | ||
414 | 420 | ||
415 | pci_write_config_dword(mac->iob_pdev, | 421 | pci_write_config_dword(mac->iob_pdev, |
416 | PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); | 422 | PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); |
@@ -591,21 +597,24 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | |||
591 | { | 597 | { |
592 | struct net_device *dev = data; | 598 | struct net_device *dev = data; |
593 | struct pasemi_mac *mac = netdev_priv(dev); | 599 | struct pasemi_mac *mac = netdev_priv(dev); |
594 | unsigned int reg; | 600 | unsigned int reg, pcnt; |
595 | 601 | ||
596 | if (!(*mac->tx_status & PAS_STATUS_CAUSE_M)) | 602 | if (!(*mac->tx_status & PAS_STATUS_CAUSE_M)) |
597 | return IRQ_NONE; | 603 | return IRQ_NONE; |
598 | 604 | ||
599 | pasemi_mac_clean_tx(mac); | 605 | pasemi_mac_clean_tx(mac); |
600 | 606 | ||
601 | reg = PAS_IOB_DMA_TXCH_RESET_PINTC; | 607 | pcnt = *mac->tx_status & PAS_STATUS_PCNT_M; |
608 | |||
609 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; | ||
602 | 610 | ||
603 | if (*mac->tx_status & PAS_STATUS_SOFT) | 611 | if (*mac->tx_status & PAS_STATUS_SOFT) |
604 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; | 612 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; |
605 | if (*mac->tx_status & PAS_STATUS_ERROR) | 613 | if (*mac->tx_status & PAS_STATUS_ERROR) |
606 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; | 614 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; |
607 | 615 | ||
608 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), | 616 | pci_write_config_dword(mac->iob_pdev, |
617 | PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), | ||
609 | reg); | 618 | reg); |
610 | 619 | ||
611 | return IRQ_HANDLED; | 620 | return IRQ_HANDLED; |
@@ -974,6 +983,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
974 | if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) { | 983 | if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) { |
975 | spin_unlock_irqrestore(&txring->lock, flags); | 984 | spin_unlock_irqrestore(&txring->lock, flags); |
976 | pasemi_mac_clean_tx(mac); | 985 | pasemi_mac_clean_tx(mac); |
986 | pasemi_mac_restart_tx_intr(mac); | ||
977 | spin_lock_irqsave(&txring->lock, flags); | 987 | spin_lock_irqsave(&txring->lock, flags); |
978 | 988 | ||
979 | if (txring->next_to_clean - txring->next_to_use == | 989 | if (txring->next_to_clean - txring->next_to_use == |
@@ -1210,6 +1220,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev) | |||
1210 | static struct pci_device_id pasemi_mac_pci_tbl[] = { | 1220 | static struct pci_device_id pasemi_mac_pci_tbl[] = { |
1211 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, | 1221 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, |
1212 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, | 1222 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, |
1223 | { }, | ||
1213 | }; | 1224 | }; |
1214 | 1225 | ||
1215 | MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); | 1226 | MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); |
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index 8bc0cea8b145..c29ee159c33d 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -341,7 +341,7 @@ enum { | |||
341 | PAS_IOB_DMA_TXCH_STAT_CNTDEL_M) | 341 | PAS_IOB_DMA_TXCH_STAT_CNTDEL_M) |
342 | #define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4) | 342 | #define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4) |
343 | #define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000 | 343 | #define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000 |
344 | #define PAS_IOB_DMA_RXCH_RESET_PCNT_S 0 | 344 | #define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16 |
345 | #define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \ | 345 | #define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \ |
346 | PAS_IOB_DMA_RXCH_RESET_PCNT_M) | 346 | PAS_IOB_DMA_RXCH_RESET_PCNT_M) |
347 | #define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020 | 347 | #define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020 |
@@ -352,7 +352,7 @@ enum { | |||
352 | #define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001 | 352 | #define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001 |
353 | #define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4) | 353 | #define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4) |
354 | #define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000 | 354 | #define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000 |
355 | #define PAS_IOB_DMA_TXCH_RESET_PCNT_S 0 | 355 | #define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16 |
356 | #define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \ | 356 | #define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \ |
357 | PAS_IOB_DMA_TXCH_RESET_PCNT_M) | 357 | PAS_IOB_DMA_TXCH_RESET_PCNT_M) |
358 | #define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020 | 358 | #define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020 |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 81f24847c963..db43e42bee35 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -77,7 +77,6 @@ static const char version[] = | |||
77 | #include <linux/skbuff.h> | 77 | #include <linux/skbuff.h> |
78 | 78 | ||
79 | #include <asm/io.h> | 79 | #include <asm/io.h> |
80 | #include <asm/irq.h> | ||
81 | 80 | ||
82 | #include "smc911x.h" | 81 | #include "smc911x.h" |
83 | 82 | ||
@@ -2084,12 +2083,11 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) | |||
2084 | lp->ctl_rspeed = 100; | 2083 | lp->ctl_rspeed = 100; |
2085 | 2084 | ||
2086 | /* Grab the IRQ */ | 2085 | /* Grab the IRQ */ |
2087 | retval = request_irq(dev->irq, &smc911x_interrupt, IRQF_SHARED, dev->name, dev); | 2086 | retval = request_irq(dev->irq, &smc911x_interrupt, |
2087 | IRQF_SHARED | IRQF_TRIGGER_FALLING, dev->name, dev); | ||
2088 | if (retval) | 2088 | if (retval) |
2089 | goto err_out; | 2089 | goto err_out; |
2090 | 2090 | ||
2091 | set_irq_type(dev->irq, IRQT_FALLING); | ||
2092 | |||
2093 | #ifdef SMC_USE_DMA | 2091 | #ifdef SMC_USE_DMA |
2094 | lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); | 2092 | lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); |
2095 | lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); | 2093 | lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 0f667652fda9..c2ccbd098f53 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. | 2 | * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Shlomi Gridish <gridish@freescale.com> | 4 | * Author: Shlomi Gridish <gridish@freescale.com> |
5 | * Li Yang <leoli@freescale.com> | 5 | * Li Yang <leoli@freescale.com> |
@@ -3737,21 +3737,21 @@ static int ucc_geth_close(struct net_device *dev) | |||
3737 | 3737 | ||
3738 | const struct ethtool_ops ucc_geth_ethtool_ops = { }; | 3738 | const struct ethtool_ops ucc_geth_ethtool_ops = { }; |
3739 | 3739 | ||
3740 | static phy_interface_t to_phy_interface(const char *interface_type) | 3740 | static phy_interface_t to_phy_interface(const char *phy_connection_type) |
3741 | { | 3741 | { |
3742 | if (strcasecmp(interface_type, "mii") == 0) | 3742 | if (strcasecmp(phy_connection_type, "mii") == 0) |
3743 | return PHY_INTERFACE_MODE_MII; | 3743 | return PHY_INTERFACE_MODE_MII; |
3744 | if (strcasecmp(interface_type, "gmii") == 0) | 3744 | if (strcasecmp(phy_connection_type, "gmii") == 0) |
3745 | return PHY_INTERFACE_MODE_GMII; | 3745 | return PHY_INTERFACE_MODE_GMII; |
3746 | if (strcasecmp(interface_type, "tbi") == 0) | 3746 | if (strcasecmp(phy_connection_type, "tbi") == 0) |
3747 | return PHY_INTERFACE_MODE_TBI; | 3747 | return PHY_INTERFACE_MODE_TBI; |
3748 | if (strcasecmp(interface_type, "rmii") == 0) | 3748 | if (strcasecmp(phy_connection_type, "rmii") == 0) |
3749 | return PHY_INTERFACE_MODE_RMII; | 3749 | return PHY_INTERFACE_MODE_RMII; |
3750 | if (strcasecmp(interface_type, "rgmii") == 0) | 3750 | if (strcasecmp(phy_connection_type, "rgmii") == 0) |
3751 | return PHY_INTERFACE_MODE_RGMII; | 3751 | return PHY_INTERFACE_MODE_RGMII; |
3752 | if (strcasecmp(interface_type, "rgmii-id") == 0) | 3752 | if (strcasecmp(phy_connection_type, "rgmii-id") == 0) |
3753 | return PHY_INTERFACE_MODE_RGMII_ID; | 3753 | return PHY_INTERFACE_MODE_RGMII_ID; |
3754 | if (strcasecmp(interface_type, "rtbi") == 0) | 3754 | if (strcasecmp(phy_connection_type, "rtbi") == 0) |
3755 | return PHY_INTERFACE_MODE_RTBI; | 3755 | return PHY_INTERFACE_MODE_RTBI; |
3756 | 3756 | ||
3757 | return PHY_INTERFACE_MODE_MII; | 3757 | return PHY_INTERFACE_MODE_MII; |
@@ -3819,29 +3819,21 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3819 | ug_info->phy_address = *prop; | 3819 | ug_info->phy_address = *prop; |
3820 | 3820 | ||
3821 | /* get the phy interface type, or default to MII */ | 3821 | /* get the phy interface type, or default to MII */ |
3822 | prop = of_get_property(np, "interface-type", NULL); | 3822 | prop = of_get_property(np, "phy-connection-type", NULL); |
3823 | if (!prop) { | 3823 | if (!prop) { |
3824 | /* handle interface property present in old trees */ | 3824 | /* handle interface property present in old trees */ |
3825 | prop = of_get_property(phy, "interface", NULL); | 3825 | prop = of_get_property(phy, "interface", NULL); |
3826 | if (prop != NULL) | 3826 | if (prop != NULL) { |
3827 | phy_interface = enet_to_phy_interface[*prop]; | 3827 | phy_interface = enet_to_phy_interface[*prop]; |
3828 | else | 3828 | max_speed = enet_to_speed[*prop]; |
3829 | } else | ||
3829 | phy_interface = PHY_INTERFACE_MODE_MII; | 3830 | phy_interface = PHY_INTERFACE_MODE_MII; |
3830 | } else { | 3831 | } else { |
3831 | phy_interface = to_phy_interface((const char *)prop); | 3832 | phy_interface = to_phy_interface((const char *)prop); |
3832 | } | 3833 | } |
3833 | 3834 | ||
3834 | /* get speed, or derive from interface */ | 3835 | /* get speed, or derive from PHY interface */ |
3835 | prop = of_get_property(np, "max-speed", NULL); | 3836 | if (max_speed == 0) |
3836 | if (!prop) { | ||
3837 | /* handle interface property present in old trees */ | ||
3838 | prop = of_get_property(phy, "interface", NULL); | ||
3839 | if (prop != NULL) | ||
3840 | max_speed = enet_to_speed[*prop]; | ||
3841 | } else { | ||
3842 | max_speed = *prop; | ||
3843 | } | ||
3844 | if (!max_speed) { | ||
3845 | switch (phy_interface) { | 3837 | switch (phy_interface) { |
3846 | case PHY_INTERFACE_MODE_GMII: | 3838 | case PHY_INTERFACE_MODE_GMII: |
3847 | case PHY_INTERFACE_MODE_RGMII: | 3839 | case PHY_INTERFACE_MODE_RGMII: |
@@ -3854,9 +3846,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3854 | max_speed = SPEED_100; | 3846 | max_speed = SPEED_100; |
3855 | break; | 3847 | break; |
3856 | } | 3848 | } |
3857 | } | ||
3858 | 3849 | ||
3859 | if (max_speed == SPEED_1000) { | 3850 | if (max_speed == SPEED_1000) { |
3851 | /* configure muram FIFOs for gigabit operation */ | ||
3860 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; | 3852 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; |
3861 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; | 3853 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; |
3862 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; | 3854 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; |
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 27a1ef3b7b06..f96966d4bcc2 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c | |||
@@ -1,12 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/net/ucc_geth_mii.c | 2 | * drivers/net/ucc_geth_mii.c |
3 | * | 3 | * |
4 | * Gianfar Ethernet Driver -- MIIM bus implementation | 4 | * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation |
5 | * Provides Bus interface for MIIM regs | 5 | * Provides Bus interface for MII Management regs in the UCC register space |
6 | * | 6 | * |
7 | * Author: Li Yang | 7 | * Copyright (C) 2007 Freescale Semiconductor, Inc. |
8 | * | 8 | * |
9 | * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. | 9 | * Authors: Li Yang <leoli@freescale.com> |
10 | * Kim Phillips <kim.phillips@freescale.com> | ||
10 | * | 11 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | 13 | * under the terms of the GNU General Public License as published by the |
diff --git a/drivers/net/ucc_geth_mii.h b/drivers/net/ucc_geth_mii.h index 98430fe0bfc6..d83437039919 100644 --- a/drivers/net/ucc_geth_mii.h +++ b/drivers/net/ucc_geth_mii.h | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/net/ucc_geth_mii.h | 2 | * drivers/net/ucc_geth_mii.h |
3 | * | 3 | * |
4 | * Gianfar Ethernet Driver -- MII Management Bus Implementation | 4 | * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation |
5 | * Driver for the MDIO bus controller in the Gianfar register space | 5 | * Provides Bus interface for MII Management regs in the UCC register space |
6 | * | 6 | * |
7 | * Author: Andy Fleming | 7 | * Copyright (C) 2007 Freescale Semiconductor, Inc. |
8 | * Maintainer: Kumar Gala | ||
9 | * | 8 | * |
10 | * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. | 9 | * Authors: Li Yang <leoli@freescale.com> |
10 | * Kim Phillips <kim.phillips@freescale.com> | ||
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the | 13 | * under the terms of the GNU General Public License as published by the |
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 8410587348f1..178155bf9db6 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/ebus.h> | 18 | #include <asm/ebus.h> |
19 | #include <asm/spitfire.h> | 19 | #include <asm/spitfire.h> |
20 | #include <asm/bbc.h> | 20 | #include <asm/bbc.h> |
21 | #include <asm/io.h> | ||
21 | 22 | ||
22 | #include "bbc_i2c.h" | 23 | #include "bbc_i2c.h" |
23 | 24 | ||
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 2d14a29effe4..3279a1b6501d 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/ebus.h> /* EBus device */ | 20 | #include <asm/ebus.h> /* EBus device */ |
21 | #include <asm/oplib.h> /* OpenProm Library */ | 21 | #include <asm/oplib.h> /* OpenProm Library */ |
22 | #include <asm/uaccess.h> /* put_/get_user */ | 22 | #include <asm/uaccess.h> /* put_/get_user */ |
23 | #include <asm/io.h> | ||
23 | 24 | ||
24 | #include <asm/display7seg.h> | 25 | #include <asm/display7seg.h> |
25 | 26 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e62d23f65180..d28c14e23c32 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1757,6 +1757,14 @@ config SCSI_ESP_CORE | |||
1757 | tristate "ESP Scsi Driver Core" | 1757 | tristate "ESP Scsi Driver Core" |
1758 | depends on SCSI | 1758 | depends on SCSI |
1759 | select SCSI_SPI_ATTRS | 1759 | select SCSI_SPI_ATTRS |
1760 | help | ||
1761 | This is a core driver for NCR53c9x based scsi chipsets, | ||
1762 | also known as "ESP" for Emulex Scsi Processor or | ||
1763 | Enhanced Scsi Processor. This driver does not exist by | ||
1764 | itself, there are front-end drivers which, when enabled, | ||
1765 | select and enable this driver. One example is SCSI_SUNESP. | ||
1766 | These front-end drivers provide probing, DMA, and register | ||
1767 | access support for the core driver. | ||
1760 | 1768 | ||
1761 | config SCSI_SUNESP | 1769 | config SCSI_SUNESP |
1762 | tristate "Sparc ESP Scsi Driver" | 1770 | tristate "Sparc ESP Scsi Driver" |
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index 40d48566215c..c3a6bd2e7950 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c | |||
@@ -493,6 +493,10 @@ static struct of_device_id hv_match[] = { | |||
493 | .name = "console", | 493 | .name = "console", |
494 | .compatible = "qcn", | 494 | .compatible = "qcn", |
495 | }, | 495 | }, |
496 | { | ||
497 | .name = "console", | ||
498 | .compatible = "SUNW,sun4v-console", | ||
499 | }, | ||
496 | {}, | 500 | {}, |
497 | }; | 501 | }; |
498 | MODULE_DEVICE_TABLE(of, hv_match); | 502 | MODULE_DEVICE_TABLE(of, hv_match); |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index eebcb708cff1..4d7485fa553f 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -1535,7 +1535,7 @@ config FB_LEO | |||
1535 | 1535 | ||
1536 | config FB_XVR500 | 1536 | config FB_XVR500 |
1537 | bool "Sun XVR-500 3DLABS Wildcat support" | 1537 | bool "Sun XVR-500 3DLABS Wildcat support" |
1538 | depends on FB && PCI && SPARC64 | 1538 | depends on (FB = y) && PCI && SPARC64 |
1539 | select FB_CFB_FILLRECT | 1539 | select FB_CFB_FILLRECT |
1540 | select FB_CFB_COPYAREA | 1540 | select FB_CFB_COPYAREA |
1541 | select FB_CFB_IMAGEBLIT | 1541 | select FB_CFB_IMAGEBLIT |
@@ -1548,7 +1548,7 @@ config FB_XVR500 | |||
1548 | 1548 | ||
1549 | config FB_XVR2500 | 1549 | config FB_XVR2500 |
1550 | bool "Sun XVR-2500 3DLABS Wildcat support" | 1550 | bool "Sun XVR-2500 3DLABS Wildcat support" |
1551 | depends on FB && PCI && SPARC64 | 1551 | depends on (FB = y) && PCI && SPARC64 |
1552 | select FB_CFB_FILLRECT | 1552 | select FB_CFB_FILLRECT |
1553 | select FB_CFB_COPYAREA | 1553 | select FB_CFB_COPYAREA |
1554 | select FB_CFB_IMAGEBLIT | 1554 | select FB_CFB_IMAGEBLIT |