aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/battery.c3
-rw-r--r--drivers/acpi/bus.c11
-rw-r--r--drivers/acpi/hotkey.c281
-rw-r--r--drivers/acpi/i2c_ec.c2
-rw-r--r--drivers/acpi/osl.c10
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/cdrom/gscd.c2
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/tty_io.c808
-rw-r--r--drivers/char/tty_ioctl.c59
-rw-r--r--drivers/char/vt_ioctl.c2
-rw-r--r--drivers/hwmon/abituguru.c99
-rw-r--r--drivers/i2c/chips/tps65010.c12
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c54
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c22
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/misc/wistron_btns.c16
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptfc.c92
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c54
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/quirks.c59
-rw-r--r--drivers/rtc/rtc-s3c.c124
-rw-r--r--drivers/s390/block/dasd_devmap.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/scsi/zfcp_aux.c120
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c212
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c122
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c79
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
-rw-r--r--drivers/scsi/ata_piix.c84
-rw-r--r--drivers/scsi/esp.c3
-rw-r--r--drivers/scsi/hptiop.c568
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c209
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libata-core.c2
-rw-r--r--drivers/scsi/libiscsi.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c42
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/pdc_adma.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/sata_via.c117
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunzilog.c3
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/imacfb.c4
-rw-r--r--drivers/video/matrox/g450_pll.c8
97 files changed, 2445 insertions, 1728 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 96309b9660da..11abc7bf777e 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -285,6 +285,8 @@ static int __init acpi_ac_init(void)
285{ 285{
286 int result; 286 int result;
287 287
288 if (acpi_disabled)
289 return -ENODEV;
288 290
289 acpi_ac_dir = acpi_lock_ac_dir(); 291 acpi_ac_dir = acpi_lock_ac_dir();
290 if (!acpi_ac_dir) 292 if (!acpi_ac_dir)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b0d4b147b19e..1dda370f402b 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle,
484 484
485 485
486 status = is_memory_device(handle); 486 status = is_memory_device(handle);
487 if (ACPI_FAILURE(status)){ 487 if (ACPI_FAILURE(status))
488 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
489 return AE_OK; /* continue */ 488 return AE_OK; /* continue */
490 }
491 489
492 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 490 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
493 acpi_memory_device_notify, NULL); 491 acpi_memory_device_notify, NULL);
@@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle,
503 501
504 502
505 status = is_memory_device(handle); 503 status = is_memory_device(handle);
506 if (ACPI_FAILURE(status)){ 504 if (ACPI_FAILURE(status))
507 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
508 return AE_OK; /* continue */ 505 return AE_OK; /* continue */
509 }
510 506
511 status = acpi_remove_notify_handler(handle, 507 status = acpi_remove_notify_handler(handle,
512 ACPI_SYSTEM_NOTIFY, 508 ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6e5221707d97..9810e2a55d0a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -757,6 +757,9 @@ static int __init acpi_battery_init(void)
757{ 757{
758 int result; 758 int result;
759 759
760 if (acpi_disabled)
761 return -ENODEV;
762
760 acpi_battery_dir = acpi_lock_battery_dir(); 763 acpi_battery_dir = acpi_lock_battery_dir();
761 if (!acpi_battery_dir) 764 if (!acpi_battery_dir)
762 return -ENODEV; 765 return -ENODEV;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b2977695e120..279c4bac92e5 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/pm.h> 31#include <linux/pm.h>
@@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
68 69
69 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device); 70 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
70 if (ACPI_FAILURE(status) || !*device) { 71 if (ACPI_FAILURE(status) || !*device) {
71 ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle)); 72 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
73 handle));
72 return -ENODEV; 74 return -ENODEV;
73 } 75 }
74 76
@@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
192 /* Make sure this is a valid target state */ 194 /* Make sure this is a valid target state */
193 195
194 if (!device->flags.power_manageable) { 196 if (!device->flags.power_manageable) {
195 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable", 197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
196 device->kobj.name)); 198 device->kobj.name));
197 return -ENODEV; 199 return -ENODEV;
198 } 200 }
@@ -738,7 +740,10 @@ static int __init acpi_init(void)
738 return -ENODEV; 740 return -ENODEV;
739 } 741 }
740 742
741 firmware_register(&acpi_subsys); 743 result = firmware_register(&acpi_subsys);
744 if (result < 0)
745 printk(KERN_WARNING "%s: firmware_register error: %d\n",
746 __FUNCTION__, result);
742 747
743 result = acpi_bus_init(); 748 result = acpi_bus_init();
744 749
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 32c9d88fd196..1ba2db671865 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -91,6 +91,14 @@ enum {
91 HK_EVENT_ENTERRING_S5, 91 HK_EVENT_ENTERRING_S5,
92}; 92};
93 93
94enum conf_entry_enum {
95 bus_handle = 0,
96 bus_method = 1,
97 action_handle = 2,
98 method = 3,
99 LAST_CONF_ENTRY
100};
101
94/* procdir we use */ 102/* procdir we use */
95static struct proc_dir_entry *hotkey_proc_dir; 103static struct proc_dir_entry *hotkey_proc_dir;
96static struct proc_dir_entry *hotkey_config; 104static struct proc_dir_entry *hotkey_config;
@@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file)
244 252
245static char *format_result(union acpi_object *object) 253static char *format_result(union acpi_object *object)
246{ 254{
247 char *buf = NULL; 255 char *buf;
248
249 buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL);
250 if (buf)
251 memset(buf, 0, RESULT_STR_LEN);
252 else
253 goto do_fail;
254 256
257 buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
258 if (!buf)
259 return NULL;
255 /* Now, just support integer type */ 260 /* Now, just support integer type */
256 if (object->type == ACPI_TYPE_INTEGER) 261 if (object->type == ACPI_TYPE_INTEGER)
257 sprintf(buf, "%d\n", (u32) object->integer.value); 262 sprintf(buf, "%d\n", (u32) object->integer.value);
258 do_fail: 263 return buf;
259 return (buf);
260} 264}
261 265
262static int hotkey_polling_seq_show(struct seq_file *seq, void *offset) 266static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
@@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key)
486 490
487static void free_hotkey_buffer(union acpi_hotkey *key) 491static void free_hotkey_buffer(union acpi_hotkey *key)
488{ 492{
493 /* key would never be null, action method could be */
489 kfree(key->event_hotkey.action_method); 494 kfree(key->event_hotkey.action_method);
490} 495}
491 496
492static void free_poll_hotkey_buffer(union acpi_hotkey *key) 497static void free_poll_hotkey_buffer(union acpi_hotkey *key)
493{ 498{
499 /* key would never be null, others could be*/
494 kfree(key->poll_hotkey.action_method); 500 kfree(key->poll_hotkey.action_method);
495 kfree(key->poll_hotkey.poll_method); 501 kfree(key->poll_hotkey.poll_method);
496 kfree(key->poll_hotkey.poll_result); 502 kfree(key->poll_hotkey.poll_result);
497} 503}
498static int 504static int
499init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str, 505init_hotkey_device(union acpi_hotkey *key, char **config_entry,
500 char *method, int std_num, int external_num) 506 int std_num, int external_num)
501{ 507{
502 acpi_handle tmp_handle; 508 acpi_handle tmp_handle;
503 acpi_status status = AE_OK; 509 acpi_status status = AE_OK;
504 510
505
506 if (std_num < 0 || IS_POLL(std_num) || !key) 511 if (std_num < 0 || IS_POLL(std_num) || !key)
507 goto do_fail; 512 goto do_fail;
508 513
509 if (!bus_str || !action_str || !method) 514 if (!config_entry[bus_handle] || !config_entry[action_handle]
515 || !config_entry[method])
510 goto do_fail; 516 goto do_fail;
511 517
512 key->link.hotkey_type = ACPI_HOTKEY_EVENT; 518 key->link.hotkey_type = ACPI_HOTKEY_EVENT;
513 key->link.hotkey_standard_num = std_num; 519 key->link.hotkey_standard_num = std_num;
514 key->event_hotkey.flag = 0; 520 key->event_hotkey.flag = 0;
515 key->event_hotkey.action_method = method; 521 key->event_hotkey.action_method = config_entry[method];
516 522
517 status = 523 status = acpi_get_handle(NULL, config_entry[bus_handle],
518 acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle)); 524 &(key->event_hotkey.bus_handle));
519 if (ACPI_FAILURE(status)) 525 if (ACPI_FAILURE(status))
520 goto do_fail; 526 goto do_fail_zero;
521 key->event_hotkey.external_hotkey_num = external_num; 527 key->event_hotkey.external_hotkey_num = external_num;
522 status = 528 status = acpi_get_handle(NULL, config_entry[action_handle],
523 acpi_get_handle(NULL, action_str,
524 &(key->event_hotkey.action_handle)); 529 &(key->event_hotkey.action_handle));
525 if (ACPI_FAILURE(status)) 530 if (ACPI_FAILURE(status))
526 goto do_fail; 531 goto do_fail_zero;
527 status = acpi_get_handle(key->event_hotkey.action_handle, 532 status = acpi_get_handle(key->event_hotkey.action_handle,
528 method, &tmp_handle); 533 config_entry[method], &tmp_handle);
529 if (ACPI_FAILURE(status)) 534 if (ACPI_FAILURE(status))
530 goto do_fail; 535 goto do_fail_zero;
531 return AE_OK; 536 return AE_OK;
532 do_fail: 537do_fail_zero:
538 key->event_hotkey.action_method = NULL;
539do_fail:
533 return -ENODEV; 540 return -ENODEV;
534} 541}
535 542
536static int 543static int
537init_poll_hotkey_device(union acpi_hotkey *key, 544init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
538 char *poll_str, 545 int std_num)
539 char *poll_method,
540 char *action_str, char *action_method, int std_num)
541{ 546{
542 acpi_status status = AE_OK; 547 acpi_status status = AE_OK;
543 acpi_handle tmp_handle; 548 acpi_handle tmp_handle;
544 549
545
546 if (std_num < 0 || IS_EVENT(std_num) || !key) 550 if (std_num < 0 || IS_EVENT(std_num) || !key)
547 goto do_fail; 551 goto do_fail;
548 552 if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
549 if (!poll_str || !poll_method || !action_str || !action_method) 553 !config_entry[action_handle] || !config_entry[method])
550 goto do_fail; 554 goto do_fail;
551 555
552 key->link.hotkey_type = ACPI_HOTKEY_POLLING; 556 key->link.hotkey_type = ACPI_HOTKEY_POLLING;
553 key->link.hotkey_standard_num = std_num; 557 key->link.hotkey_standard_num = std_num;
554 key->poll_hotkey.flag = 0; 558 key->poll_hotkey.flag = 0;
555 key->poll_hotkey.poll_method = poll_method; 559 key->poll_hotkey.poll_method = config_entry[bus_method];
556 key->poll_hotkey.action_method = action_method; 560 key->poll_hotkey.action_method = config_entry[method];
557 561
558 status = 562 status = acpi_get_handle(NULL, config_entry[bus_handle],
559 acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle)); 563 &(key->poll_hotkey.poll_handle));
560 if (ACPI_FAILURE(status)) 564 if (ACPI_FAILURE(status))
561 goto do_fail; 565 goto do_fail_zero;
562 status = acpi_get_handle(key->poll_hotkey.poll_handle, 566 status = acpi_get_handle(key->poll_hotkey.poll_handle,
563 poll_method, &tmp_handle); 567 config_entry[bus_method], &tmp_handle);
564 if (ACPI_FAILURE(status)) 568 if (ACPI_FAILURE(status))
565 goto do_fail; 569 goto do_fail_zero;
566 status = 570 status =
567 acpi_get_handle(NULL, action_str, 571 acpi_get_handle(NULL, config_entry[action_handle],
568 &(key->poll_hotkey.action_handle)); 572 &(key->poll_hotkey.action_handle));
569 if (ACPI_FAILURE(status)) 573 if (ACPI_FAILURE(status))
570 goto do_fail; 574 goto do_fail_zero;
571 status = acpi_get_handle(key->poll_hotkey.action_handle, 575 status = acpi_get_handle(key->poll_hotkey.action_handle,
572 action_method, &tmp_handle); 576 config_entry[method], &tmp_handle);
573 if (ACPI_FAILURE(status)) 577 if (ACPI_FAILURE(status))
574 goto do_fail; 578 goto do_fail_zero;
575 key->poll_hotkey.poll_result = 579 key->poll_hotkey.poll_result =
576 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL); 580 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL);
577 if (!key->poll_hotkey.poll_result) 581 if (!key->poll_hotkey.poll_result)
578 goto do_fail; 582 goto do_fail_zero;
579 return AE_OK; 583 return AE_OK;
580 do_fail: 584
585do_fail_zero:
586 key->poll_hotkey.poll_method = NULL;
587 key->poll_hotkey.action_method = NULL;
588do_fail:
581 return -ENODEV; 589 return -ENODEV;
582} 590}
583 591
@@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
652} 660}
653 661
654static int 662static int
655get_parms(char *config_record, 663get_parms(char *config_record, int *cmd, char **config_entry,
656 int *cmd, 664 int *internal_event_num, int *external_event_num)
657 char **bus_handle,
658 char **bus_method,
659 char **action_handle,
660 char **method, int *internal_event_num, int *external_event_num)
661{ 665{
666/* the format of *config_record =
667 * "1:\d+:*" : "cmd:internal_event_num"
668 * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
669 * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
670 */
662 char *tmp, *tmp1, count; 671 char *tmp, *tmp1, count;
672 int i;
663 673
664 sscanf(config_record, "%d", cmd); 674 sscanf(config_record, "%d", cmd);
665
666 if (*cmd == 1) { 675 if (*cmd == 1) {
667 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) != 676 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
668 2) 677 2)
@@ -674,59 +683,27 @@ get_parms(char *config_record,
674 if (!tmp) 683 if (!tmp)
675 goto do_fail; 684 goto do_fail;
676 tmp++; 685 tmp++;
677 tmp1 = strchr(tmp, ':'); 686 for (i = 0; i < LAST_CONF_ENTRY; i++) {
678 if (!tmp1) 687 tmp1 = strchr(tmp, ':');
679 goto do_fail; 688 if (!tmp1) {
680 689 goto do_fail;
681 count = tmp1 - tmp; 690 }
682 *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL); 691 count = tmp1 - tmp;
683 if (!*bus_handle) 692 config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
684 goto do_fail; 693 if (!config_entry[i])
685 strncpy(*bus_handle, tmp, count); 694 goto handle_failure;
686 *(*bus_handle + count) = 0; 695 strncpy(config_entry[i], tmp, count);
687 696 tmp = tmp1 + 1;
688 tmp = tmp1; 697 }
689 tmp++; 698 if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
690 tmp1 = strchr(tmp, ':'); 699 goto handle_failure;
691 if (!tmp1) 700 if (!IS_OTHERS(*internal_event_num)) {
692 goto do_fail; 701 return 6;
693 count = tmp1 - tmp; 702 }
694 *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL); 703handle_failure:
695 if (!*bus_method) 704 while (i-- > 0)
696 goto do_fail; 705 kfree(config_entry[i]);
697 strncpy(*bus_method, tmp, count); 706do_fail:
698 *(*bus_method + count) = 0;
699
700 tmp = tmp1;
701 tmp++;
702 tmp1 = strchr(tmp, ':');
703 if (!tmp1)
704 goto do_fail;
705 count = tmp1 - tmp;
706 *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
707 if (!*action_handle)
708 goto do_fail;
709 strncpy(*action_handle, tmp, count);
710 *(*action_handle + count) = 0;
711
712 tmp = tmp1;
713 tmp++;
714 tmp1 = strchr(tmp, ':');
715 if (!tmp1)
716 goto do_fail;
717 count = tmp1 - tmp;
718 *method = (char *)kmalloc(count + 1, GFP_KERNEL);
719 if (!*method)
720 goto do_fail;
721 strncpy(*method, tmp, count);
722 *(*method + count) = 0;
723
724 if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <=
725 0)
726 goto do_fail;
727
728 return 6;
729 do_fail:
730 return -1; 707 return -1;
731} 708}
732 709
@@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file,
736 size_t count, loff_t * data) 713 size_t count, loff_t * data)
737{ 714{
738 char *config_record = NULL; 715 char *config_record = NULL;
739 char *bus_handle = NULL; 716 char *config_entry[LAST_CONF_ENTRY];
740 char *bus_method = NULL;
741 char *action_handle = NULL;
742 char *method = NULL;
743 int cmd, internal_event_num, external_event_num; 717 int cmd, internal_event_num, external_event_num;
744 int ret = 0; 718 int ret = 0;
745 union acpi_hotkey *key = NULL; 719 union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
746 720
721 if (!key)
722 return -ENOMEM;
747 723
748 config_record = (char *)kmalloc(count + 1, GFP_KERNEL); 724 config_record = kzalloc(count + 1, GFP_KERNEL);
749 if (!config_record) 725 if (!config_record) {
726 kfree(key);
750 return -ENOMEM; 727 return -ENOMEM;
728 }
751 729
752 if (copy_from_user(config_record, buffer, count)) { 730 if (copy_from_user(config_record, buffer, count)) {
753 kfree(config_record); 731 kfree(config_record);
732 kfree(key);
754 printk(KERN_ERR PREFIX "Invalid data\n"); 733 printk(KERN_ERR PREFIX "Invalid data\n");
755 return -EINVAL; 734 return -EINVAL;
756 } 735 }
757 config_record[count] = 0; 736 ret = get_parms(config_record, &cmd, config_entry,
758 737 &internal_event_num, &external_event_num);
759 ret = get_parms(config_record,
760 &cmd,
761 &bus_handle,
762 &bus_method,
763 &action_handle,
764 &method, &internal_event_num, &external_event_num);
765
766 kfree(config_record); 738 kfree(config_record);
767 if (IS_OTHERS(internal_event_num))
768 goto do_fail;
769 if (ret != 6) { 739 if (ret != 6) {
770 do_fail:
771 kfree(bus_handle);
772 kfree(bus_method);
773 kfree(action_handle);
774 kfree(method);
775 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret); 740 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
776 return -EINVAL; 741 return -EINVAL;
777 } 742 }
778 743
779 key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
780 if (!key)
781 goto do_fail;
782 memset(key, 0, sizeof(union acpi_hotkey));
783 if (cmd == 1) { 744 if (cmd == 1) {
784 union acpi_hotkey *tmp = NULL; 745 union acpi_hotkey *tmp = NULL;
785 tmp = get_hotkey_by_event(&global_hotkey_list, 746 tmp = get_hotkey_by_event(&global_hotkey_list,
@@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file,
791 goto cont_cmd; 752 goto cont_cmd;
792 } 753 }
793 if (IS_EVENT(internal_event_num)) { 754 if (IS_EVENT(internal_event_num)) {
794 kfree(bus_method); 755 if (init_hotkey_device(key, config_entry,
795 ret = init_hotkey_device(key, bus_handle, action_handle, method, 756 internal_event_num, external_event_num))
796 internal_event_num, 757 goto init_hotkey_fail;
797 external_event_num); 758 } else {
798 } else 759 if (init_poll_hotkey_device(key, config_entry,
799 ret = init_poll_hotkey_device(key, bus_handle, bus_method, 760 internal_event_num))
800 action_handle, method, 761 goto init_poll_hotkey_fail;
801 internal_event_num);
802 if (ret) {
803 kfree(bus_handle);
804 kfree(action_handle);
805 if (IS_EVENT(internal_event_num))
806 free_hotkey_buffer(key);
807 else
808 free_poll_hotkey_buffer(key);
809 kfree(key);
810 printk(KERN_ERR PREFIX "Invalid hotkey\n");
811 return -EINVAL;
812 } 762 }
813 763cont_cmd:
814 cont_cmd:
815 kfree(bus_handle);
816 kfree(action_handle);
817
818 switch (cmd) { 764 switch (cmd) {
819 case 0: 765 case 0:
820 if (get_hotkey_by_event 766 if (get_hotkey_by_event(&global_hotkey_list,
821 (&global_hotkey_list, key->link.hotkey_standard_num)) 767 key->link.hotkey_standard_num))
822 goto fail_out; 768 goto fail_out;
823 else 769 else
824 hotkey_add(key); 770 hotkey_add(key);
@@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file,
827 hotkey_remove(key); 773 hotkey_remove(key);
828 break; 774 break;
829 case 2: 775 case 2:
776 /* key is kfree()ed if matched*/
830 if (hotkey_update(key)) 777 if (hotkey_update(key))
831 goto fail_out; 778 goto fail_out;
832 break; 779 break;
@@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file,
835 break; 782 break;
836 } 783 }
837 return count; 784 return count;
838 fail_out: 785
839 if (IS_EVENT(internal_event_num)) 786init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
840 free_hotkey_buffer(key); 787 kfree(config_entry[bus_method]);
841 else 788 config_entry[bus_method] = NULL;
842 free_poll_hotkey_buffer(key); 789init_hotkey_fail: /* failed init_hotkey_device */
790 kfree(config_entry[method]);
791fail_out:
792 kfree(config_entry[bus_handle]);
793 kfree(config_entry[action_handle]);
794 /* No double free since elements =NULL for error cases */
795 if (IS_EVENT(internal_event_num)) {
796 if (config_entry[bus_method])
797 kfree(config_entry[bus_method]);
798 free_hotkey_buffer(key); /* frees [method] */
799 } else
800 free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
843 kfree(key); 801 kfree(key);
844 printk(KERN_ERR PREFIX "invalid key\n"); 802 printk(KERN_ERR PREFIX "invalid key\n");
845 return -EINVAL; 803 return -EINVAL;
@@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file,
923 union acpi_hotkey *key; 881 union acpi_hotkey *key;
924 882
925 883
926 arg = (char *)kmalloc(count + 1, GFP_KERNEL); 884 arg = kzalloc(count + 1, GFP_KERNEL);
927 if (!arg) 885 if (!arg)
928 return -ENOMEM; 886 return -ENOMEM;
929 arg[count] = 0;
930 887
931 if (copy_from_user(arg, buffer, count)) { 888 if (copy_from_user(arg, buffer, count)) {
932 kfree(arg); 889 kfree(arg);
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
index 84239d51dc0c..6809c283ec58 100644
--- a/drivers/acpi/i2c_ec.c
+++ b/drivers/acpi/i2c_ec.c
@@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device)
330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val); 330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
331 if (ACPI_FAILURE(status)) { 331 if (ACPI_FAILURE(status)) {
332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n")); 332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
333 kfree(ec_hc->smbus); 333 kfree(ec_hc);
334 kfree(smbus); 334 kfree(smbus);
335 return -EIO; 335 return -EIO;
336 } 336 }
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b7d1514cd199..507f051d1cef 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
747 handle, units, timeout)); 747 handle, units, timeout));
748 748
749 /*
750 * This can be called during resume with interrupts off.
751 * Like boot-time, we should be single threaded and will
752 * always get the lock if we try -- timeout or not.
753 * If this doesn't succeed, then we will oops courtesy of
754 * might_sleep() in down().
755 */
756 if (!down_trylock(sem))
757 return AE_OK;
758
749 switch (timeout) { 759 switch (timeout) {
750 /* 760 /*
751 * No Wait: 761 * No Wait:
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index db7b350a5035..62bef0b3b614 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void)
1714{ 1714{
1715 int result = 0; 1715 int result = 0;
1716 1716
1717 if (acpi_disabled)
1718 return -ENODEV;
1719
1717 init_MUTEX(&sbs_sem); 1720 init_MUTEX(&sbs_sem);
1718 1721
1719 if (capacity_mode != DEF_CAPACITY_UNIT 1722 if (capacity_mode != DEF_CAPACITY_UNIT
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5fcb50c7b778..698a1540e303 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/kernel.h>
7#include <linux/acpi.h> 8#include <linux/acpi.h>
8 9
9#include <acpi/acpi_drivers.h> 10#include <acpi/acpi_drivers.h>
@@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = {
113static void acpi_device_register(struct acpi_device *device, 114static void acpi_device_register(struct acpi_device *device,
114 struct acpi_device *parent) 115 struct acpi_device *parent)
115{ 116{
117 int err;
118
116 /* 119 /*
117 * Linkage 120 * Linkage
118 * ------- 121 * -------
@@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device,
138 device->kobj.parent = &parent->kobj; 141 device->kobj.parent = &parent->kobj;
139 device->kobj.ktype = &ktype_acpi_ns; 142 device->kobj.ktype = &ktype_acpi_ns;
140 device->kobj.kset = &acpi_namespace_kset; 143 device->kobj.kset = &acpi_namespace_kset;
141 kobject_register(&device->kobj); 144 err = kobject_register(&device->kobj);
145 if (err < 0)
146 printk(KERN_WARNING "%s: kobject_register error: %d\n",
147 __FUNCTION__, err);
142 create_sysfs_device_files(device); 148 create_sysfs_device_files(device);
143} 149}
144 150
@@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void)
1450 if (acpi_disabled) 1456 if (acpi_disabled)
1451 return 0; 1457 return 0;
1452 1458
1453 kset_register(&acpi_namespace_kset); 1459 result = kset_register(&acpi_namespace_kset);
1460 if (result < 0)
1461 printk(KERN_ERR PREFIX "kset_register error: %d\n", result);
1454 1462
1455 result = bus_register(&acpi_bus_type); 1463 result = bus_register(&acpi_bus_type);
1456 if (result) { 1464 if (result) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f48227f4c8c9..d0d84c43a9d4 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle,
262 if (!data) 262 if (!data)
263 return AE_BAD_PARAMETER; 263 return AE_BAD_PARAMETER;
264 264
265 element = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 265 element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
266 if (!element) 266 if (!element)
267 return AE_NO_MEMORY; 267 return AE_NO_MEMORY;
268 268
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d7de1753e094..e9b0957f15d1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
64 "Node %d Mapped: %8lu kB\n" 64 "Node %d Mapped: %8lu kB\n"
65 "Node %d AnonPages: %8lu kB\n" 65 "Node %d AnonPages: %8lu kB\n"
66 "Node %d PageTables: %8lu kB\n" 66 "Node %d PageTables: %8lu kB\n"
67 "Node %d NFS Unstable: %8lu kB\n" 67 "Node %d NFS_Unstable: %8lu kB\n"
68 "Node %d Bounce: %8lu kB\n" 68 "Node %d Bounce: %8lu kB\n"
69 "Node %d Slab: %8lu kB\n", 69 "Node %d Slab: %8lu kB\n",
70 nid, K(i.totalram), 70 nid, K(i.totalram),
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index b6ee50a2916d..fa7082489765 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -266,7 +266,7 @@ repeat:
266 goto out; 266 goto out;
267 267
268 if (req->cmd != READ) { 268 if (req->cmd != READ) {
269 printk("GSCD: bad cmd %lu\n", rq_data_dir(req)); 269 printk("GSCD: bad cmd %u\n", rq_data_dir(req));
270 end_request(req, 0); 270 end_request(req, 0);
271 goto repeat; 271 goto repeat;
272 } 272 }
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4ea7bd5f4f56..a369dd6877d8 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -142,6 +142,7 @@ typedef struct _moxa_board_conf {
142 142
143static moxa_board_conf moxa_boards[MAX_BOARDS]; 143static moxa_board_conf moxa_boards[MAX_BOARDS];
144static void __iomem *moxaBaseAddr[MAX_BOARDS]; 144static void __iomem *moxaBaseAddr[MAX_BOARDS];
145static int loadstat[MAX_BOARDS];
145 146
146struct moxa_str { 147struct moxa_str {
147 int type; 148 int type;
@@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void)
1688 if (moxaCard == 0) 1689 if (moxaCard == 0)
1689 return (-1); 1690 return (-1);
1690 for (card = 0; card < MAX_BOARDS; card++) { 1691 for (card = 0; card < MAX_BOARDS; card++) {
1692 if (loadstat[card] == 0)
1693 continue;
1691 if ((ports = moxa_boards[card].numPorts) == 0) 1694 if ((ports = moxa_boards[card].numPorts) == 0)
1692 continue; 1695 continue;
1693 if (readb(moxaIntPend[card]) == 0xff) { 1696 if (readb(moxaIntPend[card]) == 0xff) {
@@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
2903 } 2906 }
2904 break; 2907 break;
2905 } 2908 }
2909 loadstat[cardno] = 1;
2906 return (0); 2910 return (0);
2907} 2911}
2908 2912
@@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2920 len1 = len >> 1; 2924 len1 = len >> 1;
2921 ptr = (ushort *) moxaBuff; 2925 ptr = (ushort *) moxaBuff;
2922 for (i = 0; i < len1; i++) 2926 for (i = 0; i < len1; i++)
2923 usum += *(ptr + i); 2927 usum += le16_to_cpu(*(ptr + i));
2924 retry = 0; 2928 retry = 0;
2925 do { 2929 do {
2926 len1 = len >> 1; 2930 len1 = len >> 1;
@@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2992 wlen = len >> 1; 2996 wlen = len >> 1;
2993 uptr = (ushort *) moxaBuff; 2997 uptr = (ushort *) moxaBuff;
2994 for (i = 0; i < wlen; i++) 2998 for (i = 0; i < wlen; i++)
2995 usum += uptr[i]; 2999 usum += le16_to_cpu(uptr[i]);
2996 retry = 0; 3000 retry = 0;
2997 j = 0; 3001 j = 0;
2998 do { 3002 do {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index bfdb90242a90..bb0d9199e994 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,6 +153,15 @@ int tty_ioctl(struct inode * inode, struct file * file,
153static int tty_fasync(int fd, struct file * filp, int on); 153static int tty_fasync(int fd, struct file * filp, int on);
154static void release_mem(struct tty_struct *tty, int idx); 154static void release_mem(struct tty_struct *tty, int idx);
155 155
156/**
157 * alloc_tty_struct - allocate a tty object
158 *
159 * Return a new empty tty structure. The data fields have not
160 * been initialized in any way but has been zeroed
161 *
162 * Locking: none
163 * FIXME: use kzalloc
164 */
156 165
157static struct tty_struct *alloc_tty_struct(void) 166static struct tty_struct *alloc_tty_struct(void)
158{ 167{
@@ -166,6 +175,15 @@ static struct tty_struct *alloc_tty_struct(void)
166 175
167static void tty_buffer_free_all(struct tty_struct *); 176static void tty_buffer_free_all(struct tty_struct *);
168 177
178/**
179 * free_tty_struct - free a disused tty
180 * @tty: tty struct to free
181 *
182 * Free the write buffers, tty queue and tty memory itself.
183 *
184 * Locking: none. Must be called after tty is definitely unused
185 */
186
169static inline void free_tty_struct(struct tty_struct *tty) 187static inline void free_tty_struct(struct tty_struct *tty)
170{ 188{
171 kfree(tty->write_buf); 189 kfree(tty->write_buf);
@@ -175,6 +193,17 @@ static inline void free_tty_struct(struct tty_struct *tty)
175 193
176#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) 194#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
177 195
196/**
197 * tty_name - return tty naming
198 * @tty: tty structure
199 * @buf: buffer for output
200 *
201 * Convert a tty structure into a name. The name reflects the kernel
202 * naming policy and if udev is in use may not reflect user space
203 *
204 * Locking: none
205 */
206
178char *tty_name(struct tty_struct *tty, char *buf) 207char *tty_name(struct tty_struct *tty, char *buf)
179{ 208{
180 if (!tty) /* Hmm. NULL pointer. That's fun. */ 209 if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -235,6 +264,28 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
235 * Tty buffer allocation management 264 * Tty buffer allocation management
236 */ 265 */
237 266
267
268/**
269 * tty_buffer_free_all - free buffers used by a tty
270 * @tty: tty to free from
271 *
272 * Remove all the buffers pending on a tty whether queued with data
273 * or in the free ring. Must be called when the tty is no longer in use
274 *
275 * Locking: none
276 */
277
278
279/**
280 * tty_buffer_free_all - free buffers used by a tty
281 * @tty: tty to free from
282 *
283 * Remove all the buffers pending on a tty whether queued with data
284 * or in the free ring. Must be called when the tty is no longer in use
285 *
286 * Locking: none
287 */
288
238static void tty_buffer_free_all(struct tty_struct *tty) 289static void tty_buffer_free_all(struct tty_struct *tty)
239{ 290{
240 struct tty_buffer *thead; 291 struct tty_buffer *thead;
@@ -247,19 +298,47 @@ static void tty_buffer_free_all(struct tty_struct *tty)
247 kfree(thead); 298 kfree(thead);
248 } 299 }
249 tty->buf.tail = NULL; 300 tty->buf.tail = NULL;
301 tty->buf.memory_used = 0;
250} 302}
251 303
304/**
305 * tty_buffer_init - prepare a tty buffer structure
306 * @tty: tty to initialise
307 *
308 * Set up the initial state of the buffer management for a tty device.
309 * Must be called before the other tty buffer functions are used.
310 *
311 * Locking: none
312 */
313
252static void tty_buffer_init(struct tty_struct *tty) 314static void tty_buffer_init(struct tty_struct *tty)
253{ 315{
254 spin_lock_init(&tty->buf.lock); 316 spin_lock_init(&tty->buf.lock);
255 tty->buf.head = NULL; 317 tty->buf.head = NULL;
256 tty->buf.tail = NULL; 318 tty->buf.tail = NULL;
257 tty->buf.free = NULL; 319 tty->buf.free = NULL;
320 tty->buf.memory_used = 0;
258} 321}
259 322
260static struct tty_buffer *tty_buffer_alloc(size_t size) 323/**
324 * tty_buffer_alloc - allocate a tty buffer
325 * @tty: tty device
326 * @size: desired size (characters)
327 *
328 * Allocate a new tty buffer to hold the desired number of characters.
329 * Return NULL if out of memory or the allocation would exceed the
330 * per device queue
331 *
332 * Locking: Caller must hold tty->buf.lock
333 */
334
335static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
261{ 336{
262 struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 337 struct tty_buffer *p;
338
339 if (tty->buf.memory_used + size > 65536)
340 return NULL;
341 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
263 if(p == NULL) 342 if(p == NULL)
264 return NULL; 343 return NULL;
265 p->used = 0; 344 p->used = 0;
@@ -269,17 +348,27 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
269 p->read = 0; 348 p->read = 0;
270 p->char_buf_ptr = (char *)(p->data); 349 p->char_buf_ptr = (char *)(p->data);
271 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; 350 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
272/* printk("Flip create %p\n", p); */ 351 tty->buf.memory_used += size;
273 return p; 352 return p;
274} 353}
275 354
276/* Must be called with the tty_read lock held. This needs to acquire strategy 355/**
277 code to decide if we should kfree or relink a given expired buffer */ 356 * tty_buffer_free - free a tty buffer
357 * @tty: tty owning the buffer
358 * @b: the buffer to free
359 *
360 * Free a tty buffer, or add it to the free list according to our
361 * internal strategy
362 *
363 * Locking: Caller must hold tty->buf.lock
364 */
278 365
279static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) 366static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
280{ 367{
281 /* Dumb strategy for now - should keep some stats */ 368 /* Dumb strategy for now - should keep some stats */
282/* printk("Flip dispose %p\n", b); */ 369 tty->buf.memory_used -= b->size;
370 WARN_ON(tty->buf.memory_used < 0);
371
283 if(b->size >= 512) 372 if(b->size >= 512)
284 kfree(b); 373 kfree(b);
285 else { 374 else {
@@ -288,6 +377,18 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
288 } 377 }
289} 378}
290 379
380/**
381 * tty_buffer_find - find a free tty buffer
382 * @tty: tty owning the buffer
383 * @size: characters wanted
384 *
385 * Locate an existing suitable tty buffer or if we are lacking one then
386 * allocate a new one. We round our buffers off in 256 character chunks
387 * to get better allocation behaviour.
388 *
389 * Locking: Caller must hold tty->buf.lock
390 */
391
291static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) 392static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
292{ 393{
293 struct tty_buffer **tbh = &tty->buf.free; 394 struct tty_buffer **tbh = &tty->buf.free;
@@ -299,20 +400,28 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
299 t->used = 0; 400 t->used = 0;
300 t->commit = 0; 401 t->commit = 0;
301 t->read = 0; 402 t->read = 0;
302 /* DEBUG ONLY */ 403 tty->buf.memory_used += t->size;
303/* memset(t->data, '*', size); */
304/* printk("Flip recycle %p\n", t); */
305 return t; 404 return t;
306 } 405 }
307 tbh = &((*tbh)->next); 406 tbh = &((*tbh)->next);
308 } 407 }
309 /* Round the buffer size out */ 408 /* Round the buffer size out */
310 size = (size + 0xFF) & ~ 0xFF; 409 size = (size + 0xFF) & ~ 0xFF;
311 return tty_buffer_alloc(size); 410 return tty_buffer_alloc(tty, size);
312 /* Should possibly check if this fails for the largest buffer we 411 /* Should possibly check if this fails for the largest buffer we
313 have queued and recycle that ? */ 412 have queued and recycle that ? */
314} 413}
315 414
415/**
416 * tty_buffer_request_room - grow tty buffer if needed
417 * @tty: tty structure
418 * @size: size desired
419 *
420 * Make at least size bytes of linear space available for the tty
421 * buffer. If we fail return the size we managed to find.
422 *
423 * Locking: Takes tty->buf.lock
424 */
316int tty_buffer_request_room(struct tty_struct *tty, size_t size) 425int tty_buffer_request_room(struct tty_struct *tty, size_t size)
317{ 426{
318 struct tty_buffer *b, *n; 427 struct tty_buffer *b, *n;
@@ -347,6 +456,18 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
347} 456}
348EXPORT_SYMBOL_GPL(tty_buffer_request_room); 457EXPORT_SYMBOL_GPL(tty_buffer_request_room);
349 458
459/**
460 * tty_insert_flip_string - Add characters to the tty buffer
461 * @tty: tty structure
462 * @chars: characters
463 * @size: size
464 *
465 * Queue a series of bytes to the tty buffering. All the characters
466 * passed are marked as without error. Returns the number added.
467 *
468 * Locking: Called functions may take tty->buf.lock
469 */
470
350int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, 471int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
351 size_t size) 472 size_t size)
352{ 473{
@@ -370,6 +491,20 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
370} 491}
371EXPORT_SYMBOL(tty_insert_flip_string); 492EXPORT_SYMBOL(tty_insert_flip_string);
372 493
494/**
495 * tty_insert_flip_string_flags - Add characters to the tty buffer
496 * @tty: tty structure
497 * @chars: characters
498 * @flags: flag bytes
499 * @size: size
500 *
501 * Queue a series of bytes to the tty buffering. For each character
502 * the flags array indicates the status of the character. Returns the
503 * number added.
504 *
505 * Locking: Called functions may take tty->buf.lock
506 */
507
373int tty_insert_flip_string_flags(struct tty_struct *tty, 508int tty_insert_flip_string_flags(struct tty_struct *tty,
374 const unsigned char *chars, const char *flags, size_t size) 509 const unsigned char *chars, const char *flags, size_t size)
375{ 510{
@@ -394,6 +529,17 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
394} 529}
395EXPORT_SYMBOL(tty_insert_flip_string_flags); 530EXPORT_SYMBOL(tty_insert_flip_string_flags);
396 531
532/**
533 * tty_schedule_flip - push characters to ldisc
534 * @tty: tty to push from
535 *
536 * Takes any pending buffers and transfers their ownership to the
537 * ldisc side of the queue. It then schedules those characters for
538 * processing by the line discipline.
539 *
540 * Locking: Takes tty->buf.lock
541 */
542
397void tty_schedule_flip(struct tty_struct *tty) 543void tty_schedule_flip(struct tty_struct *tty)
398{ 544{
399 unsigned long flags; 545 unsigned long flags;
@@ -405,12 +551,19 @@ void tty_schedule_flip(struct tty_struct *tty)
405} 551}
406EXPORT_SYMBOL(tty_schedule_flip); 552EXPORT_SYMBOL(tty_schedule_flip);
407 553
408/* 554/**
555 * tty_prepare_flip_string - make room for characters
556 * @tty: tty
557 * @chars: return pointer for character write area
558 * @size: desired size
559 *
409 * Prepare a block of space in the buffer for data. Returns the length 560 * Prepare a block of space in the buffer for data. Returns the length
410 * available and buffer pointer to the space which is now allocated and 561 * available and buffer pointer to the space which is now allocated and
411 * accounted for as ready for normal characters. This is used for drivers 562 * accounted for as ready for normal characters. This is used for drivers
412 * that need their own block copy routines into the buffer. There is no 563 * that need their own block copy routines into the buffer. There is no
413 * guarantee the buffer is a DMA target! 564 * guarantee the buffer is a DMA target!
565 *
566 * Locking: May call functions taking tty->buf.lock
414 */ 567 */
415 568
416int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) 569int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
@@ -427,12 +580,20 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_
427 580
428EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 581EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
429 582
430/* 583/**
584 * tty_prepare_flip_string_flags - make room for characters
585 * @tty: tty
586 * @chars: return pointer for character write area
587 * @flags: return pointer for status flag write area
588 * @size: desired size
589 *
431 * Prepare a block of space in the buffer for data. Returns the length 590 * Prepare a block of space in the buffer for data. Returns the length
432 * available and buffer pointer to the space which is now allocated and 591 * available and buffer pointer to the space which is now allocated and
433 * accounted for as ready for characters. This is used for drivers 592 * accounted for as ready for characters. This is used for drivers
434 * that need their own block copy routines into the buffer. There is no 593 * that need their own block copy routines into the buffer. There is no
435 * guarantee the buffer is a DMA target! 594 * guarantee the buffer is a DMA target!
595 *
596 * Locking: May call functions taking tty->buf.lock
436 */ 597 */
437 598
438int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) 599int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
@@ -451,10 +612,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
451 612
452 613
453 614
454/* 615/**
616 * tty_set_termios_ldisc - set ldisc field
617 * @tty: tty structure
618 * @num: line discipline number
619 *
455 * This is probably overkill for real world processors but 620 * This is probably overkill for real world processors but
456 * they are not on hot paths so a little discipline won't do 621 * they are not on hot paths so a little discipline won't do
457 * any harm. 622 * any harm.
623 *
624 * Locking: takes termios_sem
458 */ 625 */
459 626
460static void tty_set_termios_ldisc(struct tty_struct *tty, int num) 627static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
@@ -474,6 +641,19 @@ static DEFINE_SPINLOCK(tty_ldisc_lock);
474static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 641static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
475static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ 642static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
476 643
644/**
645 * tty_register_ldisc - install a line discipline
646 * @disc: ldisc number
647 * @new_ldisc: pointer to the ldisc object
648 *
649 * Installs a new line discipline into the kernel. The discipline
650 * is set up as unreferenced and then made available to the kernel
651 * from this point onwards.
652 *
653 * Locking:
654 * takes tty_ldisc_lock to guard against ldisc races
655 */
656
477int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) 657int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
478{ 658{
479 unsigned long flags; 659 unsigned long flags;
@@ -493,6 +673,18 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
493} 673}
494EXPORT_SYMBOL(tty_register_ldisc); 674EXPORT_SYMBOL(tty_register_ldisc);
495 675
676/**
677 * tty_unregister_ldisc - unload a line discipline
678 * @disc: ldisc number
679 * @new_ldisc: pointer to the ldisc object
680 *
681 * Remove a line discipline from the kernel providing it is not
682 * currently in use.
683 *
684 * Locking:
685 * takes tty_ldisc_lock to guard against ldisc races
686 */
687
496int tty_unregister_ldisc(int disc) 688int tty_unregister_ldisc(int disc)
497{ 689{
498 unsigned long flags; 690 unsigned long flags;
@@ -512,6 +704,19 @@ int tty_unregister_ldisc(int disc)
512} 704}
513EXPORT_SYMBOL(tty_unregister_ldisc); 705EXPORT_SYMBOL(tty_unregister_ldisc);
514 706
707/**
708 * tty_ldisc_get - take a reference to an ldisc
709 * @disc: ldisc number
710 *
711 * Takes a reference to a line discipline. Deals with refcounts and
712 * module locking counts. Returns NULL if the discipline is not available.
713 * Returns a pointer to the discipline and bumps the ref count if it is
714 * available
715 *
716 * Locking:
717 * takes tty_ldisc_lock to guard against ldisc races
718 */
719
515struct tty_ldisc *tty_ldisc_get(int disc) 720struct tty_ldisc *tty_ldisc_get(int disc)
516{ 721{
517 unsigned long flags; 722 unsigned long flags;
@@ -540,6 +745,17 @@ struct tty_ldisc *tty_ldisc_get(int disc)
540 745
541EXPORT_SYMBOL_GPL(tty_ldisc_get); 746EXPORT_SYMBOL_GPL(tty_ldisc_get);
542 747
748/**
749 * tty_ldisc_put - drop ldisc reference
750 * @disc: ldisc number
751 *
752 * Drop a reference to a line discipline. Manage refcounts and
753 * module usage counts
754 *
755 * Locking:
756 * takes tty_ldisc_lock to guard against ldisc races
757 */
758
543void tty_ldisc_put(int disc) 759void tty_ldisc_put(int disc)
544{ 760{
545 struct tty_ldisc *ld; 761 struct tty_ldisc *ld;
@@ -557,6 +773,19 @@ void tty_ldisc_put(int disc)
557 773
558EXPORT_SYMBOL_GPL(tty_ldisc_put); 774EXPORT_SYMBOL_GPL(tty_ldisc_put);
559 775
776/**
777 * tty_ldisc_assign - set ldisc on a tty
778 * @tty: tty to assign
779 * @ld: line discipline
780 *
781 * Install an instance of a line discipline into a tty structure. The
782 * ldisc must have a reference count above zero to ensure it remains/
783 * The tty instance refcount starts at zero.
784 *
785 * Locking:
786 * Caller must hold references
787 */
788
560static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) 789static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
561{ 790{
562 tty->ldisc = *ld; 791 tty->ldisc = *ld;
@@ -571,6 +800,8 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
571 * the tty ldisc. Return 0 on failure or 1 on success. This is 800 * the tty ldisc. Return 0 on failure or 1 on success. This is
572 * used to implement both the waiting and non waiting versions 801 * used to implement both the waiting and non waiting versions
573 * of tty_ldisc_ref 802 * of tty_ldisc_ref
803 *
804 * Locking: takes tty_ldisc_lock
574 */ 805 */
575 806
576static int tty_ldisc_try(struct tty_struct *tty) 807static int tty_ldisc_try(struct tty_struct *tty)
@@ -602,6 +833,8 @@ static int tty_ldisc_try(struct tty_struct *tty)
602 * must also be careful not to hold other locks that will deadlock 833 * must also be careful not to hold other locks that will deadlock
603 * against a discipline change, such as an existing ldisc reference 834 * against a discipline change, such as an existing ldisc reference
604 * (which we check for) 835 * (which we check for)
836 *
837 * Locking: call functions take tty_ldisc_lock
605 */ 838 */
606 839
607struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 840struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
@@ -622,6 +855,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
622 * Dereference the line discipline for the terminal and take a 855 * Dereference the line discipline for the terminal and take a
623 * reference to it. If the line discipline is in flux then 856 * reference to it. If the line discipline is in flux then
624 * return NULL. Can be called from IRQ and timer functions. 857 * return NULL. Can be called from IRQ and timer functions.
858 *
859 * Locking: called functions take tty_ldisc_lock
625 */ 860 */
626 861
627struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) 862struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
@@ -639,6 +874,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
639 * 874 *
640 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May 875 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
641 * be called in IRQ context. 876 * be called in IRQ context.
877 *
878 * Locking: takes tty_ldisc_lock
642 */ 879 */
643 880
644void tty_ldisc_deref(struct tty_ldisc *ld) 881void tty_ldisc_deref(struct tty_ldisc *ld)
@@ -683,6 +920,9 @@ static void tty_ldisc_enable(struct tty_struct *tty)
683 * 920 *
684 * Set the discipline of a tty line. Must be called from a process 921 * Set the discipline of a tty line. Must be called from a process
685 * context. 922 * context.
923 *
924 * Locking: takes tty_ldisc_lock.
925 * called functions take termios_sem
686 */ 926 */
687 927
688static int tty_set_ldisc(struct tty_struct *tty, int ldisc) 928static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
@@ -846,9 +1086,17 @@ restart:
846 return retval; 1086 return retval;
847} 1087}
848 1088
849/* 1089/**
850 * This routine returns a tty driver structure, given a device number 1090 * get_tty_driver - find device of a tty
1091 * @dev_t: device identifier
1092 * @index: returns the index of the tty
1093 *
1094 * This routine returns a tty driver structure, given a device number
1095 * and also passes back the index number.
1096 *
1097 * Locking: caller must hold tty_mutex
851 */ 1098 */
1099
852static struct tty_driver *get_tty_driver(dev_t device, int *index) 1100static struct tty_driver *get_tty_driver(dev_t device, int *index)
853{ 1101{
854 struct tty_driver *p; 1102 struct tty_driver *p;
@@ -863,11 +1111,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
863 return NULL; 1111 return NULL;
864} 1112}
865 1113
866/* 1114/**
867 * If we try to write to, or set the state of, a terminal and we're 1115 * tty_check_change - check for POSIX terminal changes
868 * not in the foreground, send a SIGTTOU. If the signal is blocked or 1116 * @tty: tty to check
869 * ignored, go ahead and perform the operation. (POSIX 7.2) 1117 *
1118 * If we try to write to, or set the state of, a terminal and we're
1119 * not in the foreground, send a SIGTTOU. If the signal is blocked or
1120 * ignored, go ahead and perform the operation. (POSIX 7.2)
1121 *
1122 * Locking: none
870 */ 1123 */
1124
871int tty_check_change(struct tty_struct * tty) 1125int tty_check_change(struct tty_struct * tty)
872{ 1126{
873 if (current->signal->tty != tty) 1127 if (current->signal->tty != tty)
@@ -1005,10 +1259,27 @@ void tty_ldisc_flush(struct tty_struct *tty)
1005 1259
1006EXPORT_SYMBOL_GPL(tty_ldisc_flush); 1260EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1007 1261
1008/* 1262/**
1009 * This can be called by the "eventd" kernel thread. That is process synchronous, 1263 * do_tty_hangup - actual handler for hangup events
1010 * but doesn't hold any locks, so we need to make sure we have the appropriate 1264 * @data: tty device
1011 * locks for what we're doing.. 1265 *
1266 * This can be called by the "eventd" kernel thread. That is process
1267 * synchronous but doesn't hold any locks, so we need to make sure we
1268 * have the appropriate locks for what we're doing.
1269 *
1270 * The hangup event clears any pending redirections onto the hung up
1271 * device. It ensures future writes will error and it does the needed
1272 * line discipline hangup and signal delivery. The tty object itself
1273 * remains intact.
1274 *
1275 * Locking:
1276 * BKL
1277 * redirect lock for undoing redirection
1278 * file list lock for manipulating list of ttys
1279 * tty_ldisc_lock from called functions
1280 * termios_sem resetting termios data
1281 * tasklist_lock to walk task list for hangup event
1282 *
1012 */ 1283 */
1013static void do_tty_hangup(void *data) 1284static void do_tty_hangup(void *data)
1014{ 1285{
@@ -1133,6 +1404,14 @@ static void do_tty_hangup(void *data)
1133 fput(f); 1404 fput(f);
1134} 1405}
1135 1406
1407/**
1408 * tty_hangup - trigger a hangup event
1409 * @tty: tty to hangup
1410 *
1411 * A carrier loss (virtual or otherwise) has occurred on this like
1412 * schedule a hangup sequence to run after this event.
1413 */
1414
1136void tty_hangup(struct tty_struct * tty) 1415void tty_hangup(struct tty_struct * tty)
1137{ 1416{
1138#ifdef TTY_DEBUG_HANGUP 1417#ifdef TTY_DEBUG_HANGUP
@@ -1145,6 +1424,15 @@ void tty_hangup(struct tty_struct * tty)
1145 1424
1146EXPORT_SYMBOL(tty_hangup); 1425EXPORT_SYMBOL(tty_hangup);
1147 1426
1427/**
1428 * tty_vhangup - process vhangup
1429 * @tty: tty to hangup
1430 *
1431 * The user has asked via system call for the terminal to be hung up.
1432 * We do this synchronously so that when the syscall returns the process
1433 * is complete. That guarantee is neccessary for security reasons.
1434 */
1435
1148void tty_vhangup(struct tty_struct * tty) 1436void tty_vhangup(struct tty_struct * tty)
1149{ 1437{
1150#ifdef TTY_DEBUG_HANGUP 1438#ifdef TTY_DEBUG_HANGUP
@@ -1156,6 +1444,14 @@ void tty_vhangup(struct tty_struct * tty)
1156} 1444}
1157EXPORT_SYMBOL(tty_vhangup); 1445EXPORT_SYMBOL(tty_vhangup);
1158 1446
1447/**
1448 * tty_hung_up_p - was tty hung up
1449 * @filp: file pointer of tty
1450 *
1451 * Return true if the tty has been subject to a vhangup or a carrier
1452 * loss
1453 */
1454
1159int tty_hung_up_p(struct file * filp) 1455int tty_hung_up_p(struct file * filp)
1160{ 1456{
1161 return (filp->f_op == &hung_up_tty_fops); 1457 return (filp->f_op == &hung_up_tty_fops);
@@ -1163,19 +1459,28 @@ int tty_hung_up_p(struct file * filp)
1163 1459
1164EXPORT_SYMBOL(tty_hung_up_p); 1460EXPORT_SYMBOL(tty_hung_up_p);
1165 1461
1166/* 1462/**
1167 * This function is typically called only by the session leader, when 1463 * disassociate_ctty - disconnect controlling tty
1168 * it wants to disassociate itself from its controlling tty. 1464 * @on_exit: true if exiting so need to "hang up" the session
1465 *
1466 * This function is typically called only by the session leader, when
1467 * it wants to disassociate itself from its controlling tty.
1169 * 1468 *
1170 * It performs the following functions: 1469 * It performs the following functions:
1171 * (1) Sends a SIGHUP and SIGCONT to the foreground process group 1470 * (1) Sends a SIGHUP and SIGCONT to the foreground process group
1172 * (2) Clears the tty from being controlling the session 1471 * (2) Clears the tty from being controlling the session
1173 * (3) Clears the controlling tty for all processes in the 1472 * (3) Clears the controlling tty for all processes in the
1174 * session group. 1473 * session group.
1175 * 1474 *
1176 * The argument on_exit is set to 1 if called when a process is 1475 * The argument on_exit is set to 1 if called when a process is
1177 * exiting; it is 0 if called by the ioctl TIOCNOTTY. 1476 * exiting; it is 0 if called by the ioctl TIOCNOTTY.
1477 *
1478 * Locking: tty_mutex is taken to protect current->signal->tty
1479 * BKL is taken for hysterical raisins
1480 * Tasklist lock is taken (under tty_mutex) to walk process
1481 * lists for the session.
1178 */ 1482 */
1483
1179void disassociate_ctty(int on_exit) 1484void disassociate_ctty(int on_exit)
1180{ 1485{
1181 struct tty_struct *tty; 1486 struct tty_struct *tty;
@@ -1222,6 +1527,25 @@ void disassociate_ctty(int on_exit)
1222 unlock_kernel(); 1527 unlock_kernel();
1223} 1528}
1224 1529
1530
1531/**
1532 * stop_tty - propogate flow control
1533 * @tty: tty to stop
1534 *
1535 * Perform flow control to the driver. For PTY/TTY pairs we
1536 * must also propogate the TIOCKPKT status. May be called
1537 * on an already stopped device and will not re-call the driver
1538 * method.
1539 *
1540 * This functionality is used by both the line disciplines for
1541 * halting incoming flow and by the driver. It may therefore be
1542 * called from any context, may be under the tty atomic_write_lock
1543 * but not always.
1544 *
1545 * Locking:
1546 * Broken. Relies on BKL which is unsafe here.
1547 */
1548
1225void stop_tty(struct tty_struct *tty) 1549void stop_tty(struct tty_struct *tty)
1226{ 1550{
1227 if (tty->stopped) 1551 if (tty->stopped)
@@ -1238,6 +1562,19 @@ void stop_tty(struct tty_struct *tty)
1238 1562
1239EXPORT_SYMBOL(stop_tty); 1563EXPORT_SYMBOL(stop_tty);
1240 1564
1565/**
1566 * start_tty - propogate flow control
1567 * @tty: tty to start
1568 *
1569 * Start a tty that has been stopped if at all possible. Perform
1570 * any neccessary wakeups and propogate the TIOCPKT status. If this
1571 * is the tty was previous stopped and is being started then the
1572 * driver start method is invoked and the line discipline woken.
1573 *
1574 * Locking:
1575 * Broken. Relies on BKL which is unsafe here.
1576 */
1577
1241void start_tty(struct tty_struct *tty) 1578void start_tty(struct tty_struct *tty)
1242{ 1579{
1243 if (!tty->stopped || tty->flow_stopped) 1580 if (!tty->stopped || tty->flow_stopped)
@@ -1258,6 +1595,23 @@ void start_tty(struct tty_struct *tty)
1258 1595
1259EXPORT_SYMBOL(start_tty); 1596EXPORT_SYMBOL(start_tty);
1260 1597
1598/**
1599 * tty_read - read method for tty device files
1600 * @file: pointer to tty file
1601 * @buf: user buffer
1602 * @count: size of user buffer
1603 * @ppos: unused
1604 *
1605 * Perform the read system call function on this terminal device. Checks
1606 * for hung up devices before calling the line discipline method.
1607 *
1608 * Locking:
1609 * Locks the line discipline internally while needed
1610 * For historical reasons the line discipline read method is
1611 * invoked under the BKL. This will go away in time so do not rely on it
1612 * in new code. Multiple read calls may be outstanding in parallel.
1613 */
1614
1261static ssize_t tty_read(struct file * file, char __user * buf, size_t count, 1615static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
1262 loff_t *ppos) 1616 loff_t *ppos)
1263{ 1617{
@@ -1302,6 +1656,7 @@ static inline ssize_t do_tty_write(
1302 ssize_t ret = 0, written = 0; 1656 ssize_t ret = 0, written = 0;
1303 unsigned int chunk; 1657 unsigned int chunk;
1304 1658
1659 /* FIXME: O_NDELAY ... */
1305 if (mutex_lock_interruptible(&tty->atomic_write_lock)) { 1660 if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
1306 return -ERESTARTSYS; 1661 return -ERESTARTSYS;
1307 } 1662 }
@@ -1318,6 +1673,9 @@ static inline ssize_t do_tty_write(
1318 * layer has problems with bigger chunks. It will 1673 * layer has problems with bigger chunks. It will
1319 * claim to be able to handle more characters than 1674 * claim to be able to handle more characters than
1320 * it actually does. 1675 * it actually does.
1676 *
1677 * FIXME: This can probably go away now except that 64K chunks
1678 * are too likely to fail unless switched to vmalloc...
1321 */ 1679 */
1322 chunk = 2048; 1680 chunk = 2048;
1323 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) 1681 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1375,6 +1733,24 @@ static inline ssize_t do_tty_write(
1375} 1733}
1376 1734
1377 1735
1736/**
1737 * tty_write - write method for tty device file
1738 * @file: tty file pointer
1739 * @buf: user data to write
1740 * @count: bytes to write
1741 * @ppos: unused
1742 *
1743 * Write data to a tty device via the line discipline.
1744 *
1745 * Locking:
1746 * Locks the line discipline as required
1747 * Writes to the tty driver are serialized by the atomic_write_lock
1748 * and are then processed in chunks to the device. The line discipline
1749 * write method will not be involked in parallel for each device
1750 * The line discipline write method is called under the big
1751 * kernel lock for historical reasons. New code should not rely on this.
1752 */
1753
1378static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, 1754static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
1379 loff_t *ppos) 1755 loff_t *ppos)
1380{ 1756{
@@ -1422,7 +1798,18 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t
1422 1798
1423static char ptychar[] = "pqrstuvwxyzabcde"; 1799static char ptychar[] = "pqrstuvwxyzabcde";
1424 1800
1425static inline void pty_line_name(struct tty_driver *driver, int index, char *p) 1801/**
1802 * pty_line_name - generate name for a pty
1803 * @driver: the tty driver in use
1804 * @index: the minor number
1805 * @p: output buffer of at least 6 bytes
1806 *
1807 * Generate a name from a driver reference and write it to the output
1808 * buffer.
1809 *
1810 * Locking: None
1811 */
1812static void pty_line_name(struct tty_driver *driver, int index, char *p)
1426{ 1813{
1427 int i = index + driver->name_base; 1814 int i = index + driver->name_base;
1428 /* ->name is initialized to "ttyp", but "tty" is expected */ 1815 /* ->name is initialized to "ttyp", but "tty" is expected */
@@ -1431,24 +1818,53 @@ static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
1431 ptychar[i >> 4 & 0xf], i & 0xf); 1818 ptychar[i >> 4 & 0xf], i & 0xf);
1432} 1819}
1433 1820
1434static inline void tty_line_name(struct tty_driver *driver, int index, char *p) 1821/**
1822 * pty_line_name - generate name for a tty
1823 * @driver: the tty driver in use
1824 * @index: the minor number
1825 * @p: output buffer of at least 7 bytes
1826 *
1827 * Generate a name from a driver reference and write it to the output
1828 * buffer.
1829 *
1830 * Locking: None
1831 */
1832static void tty_line_name(struct tty_driver *driver, int index, char *p)
1435{ 1833{
1436 sprintf(p, "%s%d", driver->name, index + driver->name_base); 1834 sprintf(p, "%s%d", driver->name, index + driver->name_base);
1437} 1835}
1438 1836
1439/* 1837/**
1838 * init_dev - initialise a tty device
1839 * @driver: tty driver we are opening a device on
1840 * @idx: device index
1841 * @tty: returned tty structure
1842 *
1843 * Prepare a tty device. This may not be a "new" clean device but
1844 * could also be an active device. The pty drivers require special
1845 * handling because of this.
1846 *
1847 * Locking:
1848 * The function is called under the tty_mutex, which
1849 * protects us from the tty struct or driver itself going away.
1850 *
1851 * On exit the tty device has the line discipline attached and
1852 * a reference count of 1. If a pair was created for pty/tty use
1853 * and the other was a pty master then it too has a reference count of 1.
1854 *
1440 * WSH 06/09/97: Rewritten to remove races and properly clean up after a 1855 * WSH 06/09/97: Rewritten to remove races and properly clean up after a
1441 * failed open. The new code protects the open with a mutex, so it's 1856 * failed open. The new code protects the open with a mutex, so it's
1442 * really quite straightforward. The mutex locking can probably be 1857 * really quite straightforward. The mutex locking can probably be
1443 * relaxed for the (most common) case of reopening a tty. 1858 * relaxed for the (most common) case of reopening a tty.
1444 */ 1859 */
1860
1445static int init_dev(struct tty_driver *driver, int idx, 1861static int init_dev(struct tty_driver *driver, int idx,
1446 struct tty_struct **ret_tty) 1862 struct tty_struct **ret_tty)
1447{ 1863{
1448 struct tty_struct *tty, *o_tty; 1864 struct tty_struct *tty, *o_tty;
1449 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc; 1865 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
1450 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc; 1866 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
1451 int retval=0; 1867 int retval = 0;
1452 1868
1453 /* check whether we're reopening an existing tty */ 1869 /* check whether we're reopening an existing tty */
1454 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { 1870 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
@@ -1662,10 +2078,20 @@ release_mem_out:
1662 goto end_init; 2078 goto end_init;
1663} 2079}
1664 2080
1665/* 2081/**
1666 * Releases memory associated with a tty structure, and clears out the 2082 * release_mem - release tty structure memory
1667 * driver table slots. 2083 *
2084 * Releases memory associated with a tty structure, and clears out the
2085 * driver table slots. This function is called when a device is no longer
2086 * in use. It also gets called when setup of a device fails.
2087 *
2088 * Locking:
2089 * tty_mutex - sometimes only
2090 * takes the file list lock internally when working on the list
2091 * of ttys that the driver keeps.
2092 * FIXME: should we require tty_mutex is held here ??
1668 */ 2093 */
2094
1669static void release_mem(struct tty_struct *tty, int idx) 2095static void release_mem(struct tty_struct *tty, int idx)
1670{ 2096{
1671 struct tty_struct *o_tty; 2097 struct tty_struct *o_tty;
@@ -2006,18 +2432,27 @@ static void release_dev(struct file * filp)
2006 2432
2007} 2433}
2008 2434
2009/* 2435/**
2010 * tty_open and tty_release keep up the tty count that contains the 2436 * tty_open - open a tty device
2011 * number of opens done on a tty. We cannot use the inode-count, as 2437 * @inode: inode of device file
2012 * different inodes might point to the same tty. 2438 * @filp: file pointer to tty
2439 *
2440 * tty_open and tty_release keep up the tty count that contains the
2441 * number of opens done on a tty. We cannot use the inode-count, as
2442 * different inodes might point to the same tty.
2013 * 2443 *
2014 * Open-counting is needed for pty masters, as well as for keeping 2444 * Open-counting is needed for pty masters, as well as for keeping
2015 * track of serial lines: DTR is dropped when the last close happens. 2445 * track of serial lines: DTR is dropped when the last close happens.
2016 * (This is not done solely through tty->count, now. - Ted 1/27/92) 2446 * (This is not done solely through tty->count, now. - Ted 1/27/92)
2017 * 2447 *
2018 * The termios state of a pty is reset on first open so that 2448 * The termios state of a pty is reset on first open so that
2019 * settings don't persist across reuse. 2449 * settings don't persist across reuse.
2450 *
2451 * Locking: tty_mutex protects current->signal->tty, get_tty_driver and
2452 * init_dev work. tty->count should protect the rest.
2453 * task_lock is held to update task details for sessions
2020 */ 2454 */
2455
2021static int tty_open(struct inode * inode, struct file * filp) 2456static int tty_open(struct inode * inode, struct file * filp)
2022{ 2457{
2023 struct tty_struct *tty; 2458 struct tty_struct *tty;
@@ -2132,6 +2567,18 @@ got_driver:
2132} 2567}
2133 2568
2134#ifdef CONFIG_UNIX98_PTYS 2569#ifdef CONFIG_UNIX98_PTYS
2570/**
2571 * ptmx_open - open a unix 98 pty master
2572 * @inode: inode of device file
2573 * @filp: file pointer to tty
2574 *
2575 * Allocate a unix98 pty master device from the ptmx driver.
2576 *
2577 * Locking: tty_mutex protects theinit_dev work. tty->count should
2578 protect the rest.
2579 * allocated_ptys_lock handles the list of free pty numbers
2580 */
2581
2135static int ptmx_open(struct inode * inode, struct file * filp) 2582static int ptmx_open(struct inode * inode, struct file * filp)
2136{ 2583{
2137 struct tty_struct *tty; 2584 struct tty_struct *tty;
@@ -2191,6 +2638,18 @@ out:
2191} 2638}
2192#endif 2639#endif
2193 2640
2641/**
2642 * tty_release - vfs callback for close
2643 * @inode: inode of tty
2644 * @filp: file pointer for handle to tty
2645 *
2646 * Called the last time each file handle is closed that references
2647 * this tty. There may however be several such references.
2648 *
2649 * Locking:
2650 * Takes bkl. See release_dev
2651 */
2652
2194static int tty_release(struct inode * inode, struct file * filp) 2653static int tty_release(struct inode * inode, struct file * filp)
2195{ 2654{
2196 lock_kernel(); 2655 lock_kernel();
@@ -2199,7 +2658,18 @@ static int tty_release(struct inode * inode, struct file * filp)
2199 return 0; 2658 return 0;
2200} 2659}
2201 2660
2202/* No kernel lock held - fine */ 2661/**
2662 * tty_poll - check tty status
2663 * @filp: file being polled
2664 * @wait: poll wait structures to update
2665 *
2666 * Call the line discipline polling method to obtain the poll
2667 * status of the device.
2668 *
2669 * Locking: locks called line discipline but ldisc poll method
2670 * may be re-entered freely by other callers.
2671 */
2672
2203static unsigned int tty_poll(struct file * filp, poll_table * wait) 2673static unsigned int tty_poll(struct file * filp, poll_table * wait)
2204{ 2674{
2205 struct tty_struct * tty; 2675 struct tty_struct * tty;
@@ -2243,6 +2713,21 @@ static int tty_fasync(int fd, struct file * filp, int on)
2243 return 0; 2713 return 0;
2244} 2714}
2245 2715
2716/**
2717 * tiocsti - fake input character
2718 * @tty: tty to fake input into
2719 * @p: pointer to character
2720 *
2721 * Fake input to a tty device. Does the neccessary locking and
2722 * input management.
2723 *
2724 * FIXME: does not honour flow control ??
2725 *
2726 * Locking:
2727 * Called functions take tty_ldisc_lock
2728 * current->signal->tty check is safe without locks
2729 */
2730
2246static int tiocsti(struct tty_struct *tty, char __user *p) 2731static int tiocsti(struct tty_struct *tty, char __user *p)
2247{ 2732{
2248 char ch, mbz = 0; 2733 char ch, mbz = 0;
@@ -2258,6 +2743,18 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2258 return 0; 2743 return 0;
2259} 2744}
2260 2745
2746/**
2747 * tiocgwinsz - implement window query ioctl
2748 * @tty; tty
2749 * @arg: user buffer for result
2750 *
2751 * Copies the kernel idea of the window size into the user buffer. No
2752 * locking is done.
2753 *
2754 * FIXME: Returning random values racing a window size set is wrong
2755 * should lock here against that
2756 */
2757
2261static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) 2758static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2262{ 2759{
2263 if (copy_to_user(arg, &tty->winsize, sizeof(*arg))) 2760 if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
@@ -2265,6 +2762,24 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2265 return 0; 2762 return 0;
2266} 2763}
2267 2764
2765/**
2766 * tiocswinsz - implement window size set ioctl
2767 * @tty; tty
2768 * @arg: user buffer for result
2769 *
2770 * Copies the user idea of the window size to the kernel. Traditionally
2771 * this is just advisory information but for the Linux console it
2772 * actually has driver level meaning and triggers a VC resize.
2773 *
2774 * Locking:
2775 * The console_sem is used to ensure we do not try and resize
2776 * the console twice at once.
2777 * FIXME: Two racing size sets may leave the console and kernel
2778 * parameters disagreeing. Is this exploitable ?
2779 * FIXME: Random values racing a window size get is wrong
2780 * should lock here against that
2781 */
2782
2268static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, 2783static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2269 struct winsize __user * arg) 2784 struct winsize __user * arg)
2270{ 2785{
@@ -2294,6 +2809,15 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2294 return 0; 2809 return 0;
2295} 2810}
2296 2811
2812/**
2813 * tioccons - allow admin to move logical console
2814 * @file: the file to become console
2815 *
2816 * Allow the adminstrator to move the redirected console device
2817 *
2818 * Locking: uses redirect_lock to guard the redirect information
2819 */
2820
2297static int tioccons(struct file *file) 2821static int tioccons(struct file *file)
2298{ 2822{
2299 if (!capable(CAP_SYS_ADMIN)) 2823 if (!capable(CAP_SYS_ADMIN))
@@ -2319,6 +2843,17 @@ static int tioccons(struct file *file)
2319 return 0; 2843 return 0;
2320} 2844}
2321 2845
2846/**
2847 * fionbio - non blocking ioctl
2848 * @file: file to set blocking value
2849 * @p: user parameter
2850 *
2851 * Historical tty interfaces had a blocking control ioctl before
2852 * the generic functionality existed. This piece of history is preserved
2853 * in the expected tty API of posix OS's.
2854 *
2855 * Locking: none, the open fle handle ensures it won't go away.
2856 */
2322 2857
2323static int fionbio(struct file *file, int __user *p) 2858static int fionbio(struct file *file, int __user *p)
2324{ 2859{
@@ -2334,6 +2869,23 @@ static int fionbio(struct file *file, int __user *p)
2334 return 0; 2869 return 0;
2335} 2870}
2336 2871
2872/**
2873 * tiocsctty - set controlling tty
2874 * @tty: tty structure
2875 * @arg: user argument
2876 *
2877 * This ioctl is used to manage job control. It permits a session
2878 * leader to set this tty as the controlling tty for the session.
2879 *
2880 * Locking:
2881 * Takes tasklist lock internally to walk sessions
2882 * Takes task_lock() when updating signal->tty
2883 *
2884 * FIXME: tty_mutex is needed to protect signal->tty references.
2885 * FIXME: why task_lock on the signal->tty reference ??
2886 *
2887 */
2888
2337static int tiocsctty(struct tty_struct *tty, int arg) 2889static int tiocsctty(struct tty_struct *tty, int arg)
2338{ 2890{
2339 struct task_struct *p; 2891 struct task_struct *p;
@@ -2374,6 +2926,18 @@ static int tiocsctty(struct tty_struct *tty, int arg)
2374 return 0; 2926 return 0;
2375} 2927}
2376 2928
2929/**
2930 * tiocgpgrp - get process group
2931 * @tty: tty passed by user
2932 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2933 * @p: returned pid
2934 *
2935 * Obtain the process group of the tty. If there is no process group
2936 * return an error.
2937 *
2938 * Locking: none. Reference to ->signal->tty is safe.
2939 */
2940
2377static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2941static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2378{ 2942{
2379 /* 2943 /*
@@ -2385,6 +2949,20 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2385 return put_user(real_tty->pgrp, p); 2949 return put_user(real_tty->pgrp, p);
2386} 2950}
2387 2951
2952/**
2953 * tiocspgrp - attempt to set process group
2954 * @tty: tty passed by user
2955 * @real_tty: tty side device matching tty passed by user
2956 * @p: pid pointer
2957 *
2958 * Set the process group of the tty to the session passed. Only
2959 * permitted where the tty session is our session.
2960 *
2961 * Locking: None
2962 *
2963 * FIXME: current->signal->tty referencing is unsafe.
2964 */
2965
2388static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2966static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2389{ 2967{
2390 pid_t pgrp; 2968 pid_t pgrp;
@@ -2408,6 +2986,18 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2408 return 0; 2986 return 0;
2409} 2987}
2410 2988
2989/**
2990 * tiocgsid - get session id
2991 * @tty: tty passed by user
2992 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2993 * @p: pointer to returned session id
2994 *
2995 * Obtain the session id of the tty. If there is no session
2996 * return an error.
2997 *
2998 * Locking: none. Reference to ->signal->tty is safe.
2999 */
3000
2411static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 3001static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2412{ 3002{
2413 /* 3003 /*
@@ -2421,6 +3011,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
2421 return put_user(real_tty->session, p); 3011 return put_user(real_tty->session, p);
2422} 3012}
2423 3013
3014/**
3015 * tiocsetd - set line discipline
3016 * @tty: tty device
3017 * @p: pointer to user data
3018 *
3019 * Set the line discipline according to user request.
3020 *
3021 * Locking: see tty_set_ldisc, this function is just a helper
3022 */
3023
2424static int tiocsetd(struct tty_struct *tty, int __user *p) 3024static int tiocsetd(struct tty_struct *tty, int __user *p)
2425{ 3025{
2426 int ldisc; 3026 int ldisc;
@@ -2430,6 +3030,21 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
2430 return tty_set_ldisc(tty, ldisc); 3030 return tty_set_ldisc(tty, ldisc);
2431} 3031}
2432 3032
3033/**
3034 * send_break - performed time break
3035 * @tty: device to break on
3036 * @duration: timeout in mS
3037 *
3038 * Perform a timed break on hardware that lacks its own driver level
3039 * timed break functionality.
3040 *
3041 * Locking:
3042 * None
3043 *
3044 * FIXME:
3045 * What if two overlap
3046 */
3047
2433static int send_break(struct tty_struct *tty, unsigned int duration) 3048static int send_break(struct tty_struct *tty, unsigned int duration)
2434{ 3049{
2435 tty->driver->break_ctl(tty, -1); 3050 tty->driver->break_ctl(tty, -1);
@@ -2442,8 +3057,19 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
2442 return 0; 3057 return 0;
2443} 3058}
2444 3059
2445static int 3060/**
2446tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) 3061 * tiocmget - get modem status
3062 * @tty: tty device
3063 * @file: user file pointer
3064 * @p: pointer to result
3065 *
3066 * Obtain the modem status bits from the tty driver if the feature
3067 * is supported. Return -EINVAL if it is not available.
3068 *
3069 * Locking: none (up to the driver)
3070 */
3071
3072static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2447{ 3073{
2448 int retval = -EINVAL; 3074 int retval = -EINVAL;
2449 3075
@@ -2456,8 +3082,20 @@ tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2456 return retval; 3082 return retval;
2457} 3083}
2458 3084
2459static int 3085/**
2460tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, 3086 * tiocmset - set modem status
3087 * @tty: tty device
3088 * @file: user file pointer
3089 * @cmd: command - clear bits, set bits or set all
3090 * @p: pointer to desired bits
3091 *
3092 * Set the modem status bits from the tty driver if the feature
3093 * is supported. Return -EINVAL if it is not available.
3094 *
3095 * Locking: none (up to the driver)
3096 */
3097
3098static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
2461 unsigned __user *p) 3099 unsigned __user *p)
2462{ 3100{
2463 int retval = -EINVAL; 3101 int retval = -EINVAL;
@@ -2573,6 +3211,7 @@ int tty_ioctl(struct inode * inode, struct file * file,
2573 clear_bit(TTY_EXCLUSIVE, &tty->flags); 3211 clear_bit(TTY_EXCLUSIVE, &tty->flags);
2574 return 0; 3212 return 0;
2575 case TIOCNOTTY: 3213 case TIOCNOTTY:
3214 /* FIXME: taks lock or tty_mutex ? */
2576 if (current->signal->tty != tty) 3215 if (current->signal->tty != tty)
2577 return -ENOTTY; 3216 return -ENOTTY;
2578 if (current->signal->leader) 3217 if (current->signal->leader)
@@ -2753,9 +3392,16 @@ void do_SAK(struct tty_struct *tty)
2753 3392
2754EXPORT_SYMBOL(do_SAK); 3393EXPORT_SYMBOL(do_SAK);
2755 3394
2756/* 3395/**
2757 * This routine is called out of the software interrupt to flush data 3396 * flush_to_ldisc
2758 * from the buffer chain to the line discipline. 3397 * @private_: tty structure passed from work queue.
3398 *
3399 * This routine is called out of the software interrupt to flush data
3400 * from the buffer chain to the line discipline.
3401 *
3402 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
3403 * while invoking the line discipline receive_buf method. The
3404 * receive_buf method is single threaded for each tty instance.
2759 */ 3405 */
2760 3406
2761static void flush_to_ldisc(void *private_) 3407static void flush_to_ldisc(void *private_)
@@ -2831,6 +3477,8 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
2831 * Convert termios baud rate data into a speed. This should be called 3477 * Convert termios baud rate data into a speed. This should be called
2832 * with the termios lock held if this termios is a terminal termios 3478 * with the termios lock held if this termios is a terminal termios
2833 * structure. May change the termios data. 3479 * structure. May change the termios data.
3480 *
3481 * Locking: none
2834 */ 3482 */
2835 3483
2836int tty_termios_baud_rate(struct termios *termios) 3484int tty_termios_baud_rate(struct termios *termios)
@@ -2859,6 +3507,8 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
2859 * Returns the baud rate as an integer for this terminal. The 3507 * Returns the baud rate as an integer for this terminal. The
2860 * termios lock must be held by the caller and the terminal bit 3508 * termios lock must be held by the caller and the terminal bit
2861 * flags may be updated. 3509 * flags may be updated.
3510 *
3511 * Locking: none
2862 */ 3512 */
2863 3513
2864int tty_get_baud_rate(struct tty_struct *tty) 3514int tty_get_baud_rate(struct tty_struct *tty)
@@ -2888,6 +3538,8 @@ EXPORT_SYMBOL(tty_get_baud_rate);
2888 * 3538 *
2889 * In the event of the queue being busy for flipping the work will be 3539 * In the event of the queue being busy for flipping the work will be
2890 * held off and retried later. 3540 * held off and retried later.
3541 *
3542 * Locking: tty buffer lock. Driver locks in low latency mode.
2891 */ 3543 */
2892 3544
2893void tty_flip_buffer_push(struct tty_struct *tty) 3545void tty_flip_buffer_push(struct tty_struct *tty)
@@ -2907,9 +3559,16 @@ void tty_flip_buffer_push(struct tty_struct *tty)
2907EXPORT_SYMBOL(tty_flip_buffer_push); 3559EXPORT_SYMBOL(tty_flip_buffer_push);
2908 3560
2909 3561
2910/* 3562/**
2911 * This subroutine initializes a tty structure. 3563 * initialize_tty_struct
3564 * @tty: tty to initialize
3565 *
3566 * This subroutine initializes a tty structure that has been newly
3567 * allocated.
3568 *
3569 * Locking: none - tty in question must not be exposed at this point
2912 */ 3570 */
3571
2913static void initialize_tty_struct(struct tty_struct *tty) 3572static void initialize_tty_struct(struct tty_struct *tty)
2914{ 3573{
2915 memset(tty, 0, sizeof(struct tty_struct)); 3574 memset(tty, 0, sizeof(struct tty_struct));
@@ -2935,6 +3594,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
2935/* 3594/*
2936 * The default put_char routine if the driver did not define one. 3595 * The default put_char routine if the driver did not define one.
2937 */ 3596 */
3597
2938static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) 3598static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2939{ 3599{
2940 tty->driver->write(tty, &ch, 1); 3600 tty->driver->write(tty, &ch, 1);
@@ -2943,19 +3603,23 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2943static struct class *tty_class; 3603static struct class *tty_class;
2944 3604
2945/** 3605/**
2946 * tty_register_device - register a tty device 3606 * tty_register_device - register a tty device
2947 * @driver: the tty driver that describes the tty device 3607 * @driver: the tty driver that describes the tty device
2948 * @index: the index in the tty driver for this tty device 3608 * @index: the index in the tty driver for this tty device
2949 * @device: a struct device that is associated with this tty device. 3609 * @device: a struct device that is associated with this tty device.
2950 * This field is optional, if there is no known struct device for this 3610 * This field is optional, if there is no known struct device
2951 * tty device it can be set to NULL safely. 3611 * for this tty device it can be set to NULL safely.
2952 * 3612 *
2953 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). 3613 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
2954 * 3614 *
2955 * This call is required to be made to register an individual tty device if 3615 * This call is required to be made to register an individual tty device
2956 * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that 3616 * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
2957 * bit is not set, this function should not be called by a tty driver. 3617 * that bit is not set, this function should not be called by a tty
3618 * driver.
3619 *
3620 * Locking: ??
2958 */ 3621 */
3622
2959struct class_device *tty_register_device(struct tty_driver *driver, 3623struct class_device *tty_register_device(struct tty_driver *driver,
2960 unsigned index, struct device *device) 3624 unsigned index, struct device *device)
2961{ 3625{
@@ -2977,13 +3641,16 @@ struct class_device *tty_register_device(struct tty_driver *driver,
2977} 3641}
2978 3642
2979/** 3643/**
2980 * tty_unregister_device - unregister a tty device 3644 * tty_unregister_device - unregister a tty device
2981 * @driver: the tty driver that describes the tty device 3645 * @driver: the tty driver that describes the tty device
2982 * @index: the index in the tty driver for this tty device 3646 * @index: the index in the tty driver for this tty device
2983 * 3647 *
2984 * If a tty device is registered with a call to tty_register_device() then 3648 * If a tty device is registered with a call to tty_register_device() then
2985 * this function must be made when the tty device is gone. 3649 * this function must be called when the tty device is gone.
3650 *
3651 * Locking: ??
2986 */ 3652 */
3653
2987void tty_unregister_device(struct tty_driver *driver, unsigned index) 3654void tty_unregister_device(struct tty_driver *driver, unsigned index)
2988{ 3655{
2989 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); 3656 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
@@ -3094,7 +3761,6 @@ int tty_register_driver(struct tty_driver *driver)
3094 driver->cdev.owner = driver->owner; 3761 driver->cdev.owner = driver->owner;
3095 error = cdev_add(&driver->cdev, dev, driver->num); 3762 error = cdev_add(&driver->cdev, dev, driver->num);
3096 if (error) { 3763 if (error) {
3097 cdev_del(&driver->cdev);
3098 unregister_chrdev_region(dev, driver->num); 3764 unregister_chrdev_region(dev, driver->num);
3099 driver->ttys = NULL; 3765 driver->ttys = NULL;
3100 driver->termios = driver->termios_locked = NULL; 3766 driver->termios = driver->termios_locked = NULL;
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index f19cf9d7792d..4ad47d321bd4 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -36,6 +36,18 @@
36#define TERMIOS_WAIT 2 36#define TERMIOS_WAIT 2
37#define TERMIOS_TERMIO 4 37#define TERMIOS_TERMIO 4
38 38
39
40/**
41 * tty_wait_until_sent - wait for I/O to finish
42 * @tty: tty we are waiting for
43 * @timeout: how long we will wait
44 *
45 * Wait for characters pending in a tty driver to hit the wire, or
46 * for a timeout to occur (eg due to flow control)
47 *
48 * Locking: none
49 */
50
39void tty_wait_until_sent(struct tty_struct * tty, long timeout) 51void tty_wait_until_sent(struct tty_struct * tty, long timeout)
40{ 52{
41 DECLARE_WAITQUEUE(wait, current); 53 DECLARE_WAITQUEUE(wait, current);
@@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios,
94 old->c_cc[i] : termios->c_cc[i]; 106 old->c_cc[i] : termios->c_cc[i];
95} 107}
96 108
109/**
110 * change_termios - update termios values
111 * @tty: tty to update
112 * @new_termios: desired new value
113 *
114 * Perform updates to the termios values set on this terminal. There
115 * is a bit of layering violation here with n_tty in terms of the
116 * internal knowledge of this function.
117 *
118 * Locking: termios_sem
119 */
120
97static void change_termios(struct tty_struct * tty, struct termios * new_termios) 121static void change_termios(struct tty_struct * tty, struct termios * new_termios)
98{ 122{
99 int canon_change; 123 int canon_change;
@@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
155 up(&tty->termios_sem); 179 up(&tty->termios_sem);
156} 180}
157 181
182/**
183 * set_termios - set termios values for a tty
184 * @tty: terminal device
185 * @arg: user data
186 * @opt: option information
187 *
188 * Helper function to prepare termios data and run neccessary other
189 * functions before using change_termios to do the actual changes.
190 *
191 * Locking:
192 * Called functions take ldisc and termios_sem locks
193 */
194
158static int set_termios(struct tty_struct * tty, void __user *arg, int opt) 195static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
159{ 196{
160 struct termios tmp_termios; 197 struct termios tmp_termios;
@@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags)
284 } 321 }
285} 322}
286 323
324/**
325 * set_sgttyb - set legacy terminal values
326 * @tty: tty structure
327 * @sgttyb: pointer to old style terminal structure
328 *
329 * Updates a terminal from the legacy BSD style terminal information
330 * structure.
331 *
332 * Locking: termios_sem
333 */
334
287static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) 335static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
288{ 336{
289 int retval; 337 int retval;
@@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars)
369} 417}
370#endif 418#endif
371 419
372/* 420/**
373 * Send a high priority character to the tty. 421 * send_prio_char - send priority character
422 *
423 * Send a high priority character to the tty even if stopped
424 *
425 * Locking: none
426 *
427 * FIXME: overlapping calls with start/stop tty lose state of tty
374 */ 428 */
429
375static void send_prio_char(struct tty_struct *tty, char ch) 430static void send_prio_char(struct tty_struct *tty, char ch)
376{ 431{
377 int was_stopped = tty->stopped; 432 int was_stopped = tty->stopped;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index eccffaf26faa..a5628a8b6620 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1011 return -EPERM; 1011 return -EPERM;
1012 vt_dont_switch = 0; 1012 vt_dont_switch = 0;
1013 return 0; 1013 return 0;
1014 case VT_GETHIFONTMASK:
1015 return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
1014 default: 1016 default:
1015 return -ENOIOCTLCMD; 1017 return -ENOIOCTLCMD;
1016 } 1018 }
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index cc15c4f2e9ec..35ad1b032726 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -26,6 +26,7 @@
26#include <linux/jiffies.h> 26#include <linux/jiffies.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
30#include <linux/hwmon.h> 31#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 32#include <linux/hwmon-sysfs.h>
@@ -64,17 +65,17 @@
64#define ABIT_UGURU_IN_SENSOR 0 65#define ABIT_UGURU_IN_SENSOR 0
65#define ABIT_UGURU_TEMP_SENSOR 1 66#define ABIT_UGURU_TEMP_SENSOR 1
66#define ABIT_UGURU_NC 2 67#define ABIT_UGURU_NC 2
67/* Timeouts / Retries, if these turn out to need a lot of fiddling we could 68/* In many cases we need to wait for the uGuru to reach a certain status, most
68 convert them to params. */ 69 of the time it will reach this status within 30 - 90 ISA reads, and thus we
69/* 250 was determined by trial and error, 200 works most of the time, but not 70 can best busy wait. This define gives the total amount of reads to try. */
70 always. I assume this is cpu-speed independent, since the ISA-bus and not 71#define ABIT_UGURU_WAIT_TIMEOUT 125
71 the CPU should be the bottleneck. Note that 250 sometimes is still not 72/* However sometimes older versions of the uGuru seem to be distracted and they
72 enough (only reported on AN7 mb) this is handled by a higher layer. */ 73 do not respond for a long time. To handle this we sleep before each of the
73#define ABIT_UGURU_WAIT_TIMEOUT 250 74 last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */
75#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5
74/* Normally all expected status in abituguru_ready, are reported after the 76/* Normally all expected status in abituguru_ready, are reported after the
75 first read, but sometimes not and we need to poll, 5 polls was not enough 77 first read, but sometimes not and we need to poll. */
76 50 sofar is. */ 78#define ABIT_UGURU_READY_TIMEOUT 5
77#define ABIT_UGURU_READY_TIMEOUT 50
78/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */ 79/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */
79#define ABIT_UGURU_MAX_RETRIES 3 80#define ABIT_UGURU_MAX_RETRIES 3
80#define ABIT_UGURU_RETRY_DELAY (HZ/5) 81#define ABIT_UGURU_RETRY_DELAY (HZ/5)
@@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state)
226 timeout--; 227 timeout--;
227 if (timeout == 0) 228 if (timeout == 0)
228 return -EBUSY; 229 return -EBUSY;
230 /* sleep a bit before our last few tries, see the comment on
231 this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */
232 if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP)
233 msleep(0);
229 } 234 }
230 return 0; 235 return 0;
231} 236}
@@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data)
256 "CMD reg does not hold 0xAC after ready command\n"); 261 "CMD reg does not hold 0xAC after ready command\n");
257 return -EIO; 262 return -EIO;
258 } 263 }
264 msleep(0);
259 } 265 }
260 266
261 /* After this the ABIT_UGURU_DATA port should contain 267 /* After this the ABIT_UGURU_DATA port should contain
@@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data)
268 "state != more input after ready command\n"); 274 "state != more input after ready command\n");
269 return -EIO; 275 return -EIO;
270 } 276 }
277 msleep(0);
271 } 278 }
272 279
273 data->uguru_ready = 1; 280 data->uguru_ready = 1;
@@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data,
331 /* And read the data */ 338 /* And read the data */
332 for (i = 0; i < count; i++) { 339 for (i = 0; i < count; i++) {
333 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 340 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
334 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for " 341 ABIT_UGURU_DEBUG(retries ? 1 : 3,
342 "timeout exceeded waiting for "
335 "read state (bank: %d, sensor: %d)\n", 343 "read state (bank: %d, sensor: %d)\n",
336 (int)bank_addr, (int)sensor_addr); 344 (int)bank_addr, (int)sensor_addr);
337 break; 345 break;
@@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data,
350static int abituguru_write(struct abituguru_data *data, 358static int abituguru_write(struct abituguru_data *data,
351 u8 bank_addr, u8 sensor_addr, u8 *buf, int count) 359 u8 bank_addr, u8 sensor_addr, u8 *buf, int count)
352{ 360{
353 int i; 361 /* We use the ready timeout as we have to wait for 0xAC just like the
362 ready function */
363 int i, timeout = ABIT_UGURU_READY_TIMEOUT;
354 364
355 /* Send the address */ 365 /* Send the address */
356 i = abituguru_send_address(data, bank_addr, sensor_addr, 366 i = abituguru_send_address(data, bank_addr, sensor_addr,
@@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data,
370 } 380 }
371 381
372 /* Now we need to wait till the chip is ready to be read again, 382 /* Now we need to wait till the chip is ready to be read again,
373 don't ask why */ 383 so that we can read 0xAC as confirmation that our write has
384 succeeded. */
374 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 385 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
375 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state " 386 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state "
376 "after write (bank: %d, sensor: %d)\n", (int)bank_addr, 387 "after write (bank: %d, sensor: %d)\n", (int)bank_addr,
@@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data,
379 } 390 }
380 391
381 /* Cmd port MUST be read now and should contain 0xAC */ 392 /* Cmd port MUST be read now and should contain 0xAC */
382 if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { 393 while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
383 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write " 394 timeout--;
384 "(bank: %d, sensor: %d)\n", (int)bank_addr, 395 if (timeout == 0) {
385 (int)sensor_addr); 396 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after "
386 return -EIO; 397 "write (bank: %d, sensor: %d)\n",
398 (int)bank_addr, (int)sensor_addr);
399 return -EIO;
400 }
401 msleep(0);
387 } 402 }
388 403
389 /* Last put the chip back in ready state */ 404 /* Last put the chip back in ready state */
@@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
403 u8 sensor_addr) 418 u8 sensor_addr)
404{ 419{
405 u8 val, buf[3]; 420 u8 val, buf[3];
406 int ret = ABIT_UGURU_NC; 421 int i, ret = -ENODEV; /* error is the most common used retval :| */
407 422
408 /* If overriden by the user return the user selected type */ 423 /* If overriden by the user return the user selected type */
409 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR && 424 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR &&
@@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
439 buf[2] = 250; 454 buf[2] = 250;
440 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 455 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
441 buf, 3) != 3) 456 buf, 3) != 3)
442 return -ENODEV; 457 goto abituguru_detect_bank1_sensor_type_exit;
443 /* Now we need 20 ms to give the uguru time to read the sensors 458 /* Now we need 20 ms to give the uguru time to read the sensors
444 and raise a voltage alarm */ 459 and raise a voltage alarm */
445 set_current_state(TASK_UNINTERRUPTIBLE); 460 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
447 /* Check for alarm and check the alarm is a volt low alarm. */ 462 /* Check for alarm and check the alarm is a volt low alarm. */
448 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 463 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
449 ABIT_UGURU_MAX_RETRIES) != 3) 464 ABIT_UGURU_MAX_RETRIES) != 3)
450 return -ENODEV; 465 goto abituguru_detect_bank1_sensor_type_exit;
451 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 466 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
452 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 467 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
453 sensor_addr, buf, 3, 468 sensor_addr, buf, 3,
454 ABIT_UGURU_MAX_RETRIES) != 3) 469 ABIT_UGURU_MAX_RETRIES) != 3)
455 return -ENODEV; 470 goto abituguru_detect_bank1_sensor_type_exit;
456 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) { 471 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
457 /* Restore original settings */
458 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
459 sensor_addr,
460 data->bank1_settings[sensor_addr],
461 3) != 3)
462 return -ENODEV;
463 ABIT_UGURU_DEBUG(2, " found volt sensor\n"); 472 ABIT_UGURU_DEBUG(2, " found volt sensor\n");
464 return ABIT_UGURU_IN_SENSOR; 473 ret = ABIT_UGURU_IN_SENSOR;
474 goto abituguru_detect_bank1_sensor_type_exit;
465 } else 475 } else
466 ABIT_UGURU_DEBUG(2, " alarm raised during volt " 476 ABIT_UGURU_DEBUG(2, " alarm raised during volt "
467 "sensor test, but volt low flag not set\n"); 477 "sensor test, but volt low flag not set\n");
@@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
477 buf[2] = 10; 487 buf[2] = 10;
478 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 488 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
479 buf, 3) != 3) 489 buf, 3) != 3)
480 return -ENODEV; 490 goto abituguru_detect_bank1_sensor_type_exit;
481 /* Now we need 50 ms to give the uguru time to read the sensors 491 /* Now we need 50 ms to give the uguru time to read the sensors
482 and raise a temp alarm */ 492 and raise a temp alarm */
483 set_current_state(TASK_UNINTERRUPTIBLE); 493 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
485 /* Check for alarm and check the alarm is a temp high alarm. */ 495 /* Check for alarm and check the alarm is a temp high alarm. */
486 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 496 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
487 ABIT_UGURU_MAX_RETRIES) != 3) 497 ABIT_UGURU_MAX_RETRIES) != 3)
488 return -ENODEV; 498 goto abituguru_detect_bank1_sensor_type_exit;
489 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 499 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
490 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 500 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
491 sensor_addr, buf, 3, 501 sensor_addr, buf, 3,
492 ABIT_UGURU_MAX_RETRIES) != 3) 502 ABIT_UGURU_MAX_RETRIES) != 3)
493 return -ENODEV; 503 goto abituguru_detect_bank1_sensor_type_exit;
494 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) { 504 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) {
495 ret = ABIT_UGURU_TEMP_SENSOR;
496 ABIT_UGURU_DEBUG(2, " found temp sensor\n"); 505 ABIT_UGURU_DEBUG(2, " found temp sensor\n");
506 ret = ABIT_UGURU_TEMP_SENSOR;
507 goto abituguru_detect_bank1_sensor_type_exit;
497 } else 508 } else
498 ABIT_UGURU_DEBUG(2, " alarm raised during temp " 509 ABIT_UGURU_DEBUG(2, " alarm raised during temp "
499 "sensor test, but temp high flag not set\n"); 510 "sensor test, but temp high flag not set\n");
@@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
501 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor " 512 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor "
502 "test\n"); 513 "test\n");
503 514
504 /* Restore original settings */ 515 ret = ABIT_UGURU_NC;
505 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 516abituguru_detect_bank1_sensor_type_exit:
506 data->bank1_settings[sensor_addr], 3) != 3) 517 /* Restore original settings, failing here is really BAD, it has been
518 reported that some BIOS-es hang when entering the uGuru menu with
519 invalid settings present in the uGuru, so we try this 3 times. */
520 for (i = 0; i < 3; i++)
521 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
522 sensor_addr, data->bank1_settings[sensor_addr],
523 3) == 3)
524 break;
525 if (i == 3) {
526 printk(KERN_ERR ABIT_UGURU_NAME
527 ": Fatal error could not restore original settings. "
528 "This should never happen please report this to the "
529 "abituguru maintainer (see MAINTAINERS)\n");
507 return -ENODEV; 530 return -ENODEV;
508 531 }
509 return ret; 532 return ret;
510} 533}
511 534
@@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev)
1305 data->update_timeouts = 0; 1328 data->update_timeouts = 0;
1306LEAVE_UPDATE: 1329LEAVE_UPDATE:
1307 /* handle timeout condition */ 1330 /* handle timeout condition */
1308 if (err == -EBUSY) { 1331 if (!success && (err == -EBUSY || err >= 0)) {
1309 /* No overflow please */ 1332 /* No overflow please */
1310 if (data->update_timeouts < 255u) 1333 if (data->update_timeouts < 255u)
1311 data->update_timeouts++; 1334 data->update_timeouts++;
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index e7e27049fbfa..0be6fd6a267d 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -43,13 +43,12 @@
43/*-------------------------------------------------------------------------*/ 43/*-------------------------------------------------------------------------*/
44 44
45#define DRIVER_VERSION "2 May 2005" 45#define DRIVER_VERSION "2 May 2005"
46#define DRIVER_NAME (tps65010_driver.name) 46#define DRIVER_NAME (tps65010_driver.driver.name)
47 47
48MODULE_DESCRIPTION("TPS6501x Power Management Driver"); 48MODULE_DESCRIPTION("TPS6501x Power Management Driver");
49MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
50 50
51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END }; 51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
52static unsigned short normal_i2c_range[] = { I2C_CLIENT_END };
53 52
54I2C_CLIENT_INSMOD; 53I2C_CLIENT_INSMOD;
55 54
@@ -100,7 +99,7 @@ struct tps65010 {
100 /* not currently tracking GPIO state */ 99 /* not currently tracking GPIO state */
101}; 100};
102 101
103#define POWER_POLL_DELAY msecs_to_jiffies(800) 102#define POWER_POLL_DELAY msecs_to_jiffies(5000)
104 103
105/*-------------------------------------------------------------------------*/ 104/*-------------------------------------------------------------------------*/
106 105
@@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
520 goto fail1; 519 goto fail1;
521 } 520 }
522 521
522 /* the IRQ is active low, but many gpio lines can't support that
523 * so this driver can use falling-edge triggers instead.
524 */
525 irqflags = IRQF_SAMPLE_RANDOM;
523#ifdef CONFIG_ARM 526#ifdef CONFIG_ARM
524 irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW;
525 if (machine_is_omap_h2()) { 527 if (machine_is_omap_h2()) {
526 tps->model = TPS65010; 528 tps->model = TPS65010;
527 omap_cfg_reg(W4_GPIO58); 529 omap_cfg_reg(W4_GPIO58);
@@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
543 545
544 // FIXME set up this board's IRQ ... 546 // FIXME set up this board's IRQ ...
545 } 547 }
546#else
547 irqflags = IRQF_SAMPLE_RANDOM;
548#endif 548#endif
549 549
550 if (tps->irq > 0) { 550 if (tps->irq > 0) {
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d4bad6704bbe..448df2773377 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
3552 3552
3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) 3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3554{ 3554{
3555 pci_save_state(pdev);
3556
3555#ifdef CONFIG_PPC_PMAC 3557#ifdef CONFIG_PPC_PMAC
3556 if (machine_is(powermac)) { 3558 if (machine_is(powermac)) {
3557 struct device_node *of_node; 3559 struct device_node *of_node;
@@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3563 } 3565 }
3564#endif 3566#endif
3565 3567
3566 pci_save_state(pdev);
3567
3568 return 0; 3568 return 0;
3569} 3569}
3570 3570
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index e05ca2cdc73f..75313ade2e0d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
301 event->event == IB_EVENT_PORT_ACTIVE || 301 event->event == IB_EVENT_PORT_ACTIVE ||
302 event->event == IB_EVENT_LID_CHANGE || 302 event->event == IB_EVENT_LID_CHANGE ||
303 event->event == IB_EVENT_PKEY_CHANGE || 303 event->event == IB_EVENT_PKEY_CHANGE ||
304 event->event == IB_EVENT_SM_CHANGE) { 304 event->event == IB_EVENT_SM_CHANGE ||
305 event->event == IB_EVENT_CLIENT_REREGISTER) {
305 work = kmalloc(sizeof *work, GFP_ATOMIC); 306 work = kmalloc(sizeof *work, GFP_ATOMIC);
306 if (work) { 307 if (work) {
307 INIT_WORK(&work->work, ib_cache_task, work); 308 INIT_WORK(&work->work, ib_cache_task, work);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index aeda484ffd82..d6b84226bba7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
405 event->event == IB_EVENT_PORT_ACTIVE || 405 event->event == IB_EVENT_PORT_ACTIVE ||
406 event->event == IB_EVENT_LID_CHANGE || 406 event->event == IB_EVENT_LID_CHANGE ||
407 event->event == IB_EVENT_PKEY_CHANGE || 407 event->event == IB_EVENT_PKEY_CHANGE ||
408 event->event == IB_EVENT_SM_CHANGE) { 408 event->event == IB_EVENT_SM_CHANGE ||
409 event->event == IB_EVENT_CLIENT_REREGISTER) {
409 struct ib_sa_device *sa_dev; 410 struct ib_sa_device *sa_dev;
410 sa_dev = container_of(handler, typeof(*sa_dev), event_handler); 411 sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
411 412
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 557cde3a4563..7b82c1907f04 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -967,12 +967,12 @@ static struct {
967} mthca_hca_table[] = { 967} mthca_hca_table[] = {
968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), 968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
969 .flags = 0 }, 969 .flags = 0 },
970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), 970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
971 .flags = MTHCA_FLAG_PCIE }, 971 .flags = MTHCA_FLAG_PCIE },
972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), 972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
973 .flags = MTHCA_FLAG_MEMFREE | 973 .flags = MTHCA_FLAG_MEMFREE |
974 MTHCA_FLAG_PCIE }, 974 MTHCA_FLAG_PCIE },
975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), 975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
976 .flags = MTHCA_FLAG_MEMFREE | 976 .flags = MTHCA_FLAG_MEMFREE |
977 MTHCA_FLAG_PCIE | 977 MTHCA_FLAG_PCIE |
978 MTHCA_FLAG_SINAI_OPT } 978 MTHCA_FLAG_SINAI_OPT }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 230ae21db8fd..265b1d1c4a62 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1291 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1292 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1293 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1294 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1295 dev->ib_dev.node_type = IB_NODE_CA; 1291 dev->ib_dev.node_type = IB_NODE_CA;
1296 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1292 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1297 dev->ib_dev.dma_device = &dev->pdev->dev; 1293 dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
1316 dev->ib_dev.modify_srq = mthca_modify_srq; 1312 dev->ib_dev.modify_srq = mthca_modify_srq;
1317 dev->ib_dev.query_srq = mthca_query_srq; 1313 dev->ib_dev.query_srq = mthca_query_srq;
1318 dev->ib_dev.destroy_srq = mthca_destroy_srq; 1314 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1315 dev->ib_dev.uverbs_cmd_mask |=
1316 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1317 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1318 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1319 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1319 1320
1320 if (mthca_is_memfree(dev)) 1321 if (mthca_is_memfree(dev))
1321 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1322 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 8de2887ba15c..9a5bece3fa5c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -136,8 +136,8 @@ struct mthca_ah {
136 * We have one global lock that protects dev->cq/qp_table. Each 136 * We have one global lock that protects dev->cq/qp_table. Each
137 * struct mthca_cq/qp also has its own lock. An individual qp lock 137 * struct mthca_cq/qp also has its own lock. An individual qp lock
138 * may be taken inside of an individual cq lock. Both cqs attached to 138 * may be taken inside of an individual cq lock. Both cqs attached to
139 * a qp may be locked, with the send cq locked first. No other 139 * a qp may be locked, with the cq with the lower cqn locked first.
140 * nesting should be done. 140 * No other nesting should be done.
141 * 141 *
142 * Each struct mthca_cq/qp also has an ref count, protected by the 142 * Each struct mthca_cq/qp also has an ref count, protected by the
143 * corresponding table lock. The pointer from the cq/qp_table to the 143 * corresponding table lock. The pointer from the cq/qp_table to the
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index cd8b6721ac9c..2e8f6f36e0a5 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -99,6 +99,10 @@ enum {
99 MTHCA_QP_BIT_RSC = 1 << 3 99 MTHCA_QP_BIT_RSC = 1 << 3
100}; 100};
101 101
102enum {
103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104};
105
102struct mthca_qp_path { 106struct mthca_qp_path {
103 __be32 port_pkey; 107 __be32 port_pkey;
104 u8 rnr_retry; 108 u8 rnr_retry;
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1259 return 0; 1263 return 0;
1260} 1264}
1261 1265
1266static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1267{
1268 if (send_cq == recv_cq)
1269 spin_lock_irq(&send_cq->lock);
1270 else if (send_cq->cqn < recv_cq->cqn) {
1271 spin_lock_irq(&send_cq->lock);
1272 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1273 } else {
1274 spin_lock_irq(&recv_cq->lock);
1275 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1276 }
1277}
1278
1279static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1280{
1281 if (send_cq == recv_cq)
1282 spin_unlock_irq(&send_cq->lock);
1283 else if (send_cq->cqn < recv_cq->cqn) {
1284 spin_unlock(&recv_cq->lock);
1285 spin_unlock_irq(&send_cq->lock);
1286 } else {
1287 spin_unlock(&send_cq->lock);
1288 spin_unlock_irq(&recv_cq->lock);
1289 }
1290}
1291
1262int mthca_alloc_sqp(struct mthca_dev *dev, 1292int mthca_alloc_sqp(struct mthca_dev *dev,
1263 struct mthca_pd *pd, 1293 struct mthca_pd *pd,
1264 struct mthca_cq *send_cq, 1294 struct mthca_cq *send_cq,
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1311 * Lock CQs here, so that CQ polling code can do QP lookup 1341 * Lock CQs here, so that CQ polling code can do QP lookup
1312 * without taking a lock. 1342 * without taking a lock.
1313 */ 1343 */
1314 spin_lock_irq(&send_cq->lock); 1344 mthca_lock_cqs(send_cq, recv_cq);
1315 if (send_cq != recv_cq)
1316 spin_lock(&recv_cq->lock);
1317 1345
1318 spin_lock(&dev->qp_table.lock); 1346 spin_lock(&dev->qp_table.lock);
1319 mthca_array_clear(&dev->qp_table.qp, mqpn); 1347 mthca_array_clear(&dev->qp_table.qp, mqpn);
1320 spin_unlock(&dev->qp_table.lock); 1348 spin_unlock(&dev->qp_table.lock);
1321 1349
1322 if (send_cq != recv_cq) 1350 mthca_unlock_cqs(send_cq, recv_cq);
1323 spin_unlock(&recv_cq->lock);
1324 spin_unlock_irq(&send_cq->lock);
1325 1351
1326 err_out: 1352 err_out:
1327 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1353 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1355 * Lock CQs here, so that CQ polling code can do QP lookup 1381 * Lock CQs here, so that CQ polling code can do QP lookup
1356 * without taking a lock. 1382 * without taking a lock.
1357 */ 1383 */
1358 spin_lock_irq(&send_cq->lock); 1384 mthca_lock_cqs(send_cq, recv_cq);
1359 if (send_cq != recv_cq)
1360 spin_lock(&recv_cq->lock);
1361 1385
1362 spin_lock(&dev->qp_table.lock); 1386 spin_lock(&dev->qp_table.lock);
1363 mthca_array_clear(&dev->qp_table.qp, 1387 mthca_array_clear(&dev->qp_table.qp,
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1365 --qp->refcount; 1389 --qp->refcount;
1366 spin_unlock(&dev->qp_table.lock); 1390 spin_unlock(&dev->qp_table.lock);
1367 1391
1368 if (send_cq != recv_cq) 1392 mthca_unlock_cqs(send_cq, recv_cq);
1369 spin_unlock(&recv_cq->lock);
1370 spin_unlock_irq(&send_cq->lock);
1371 1393
1372 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1394 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1373 1395
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1502 int i; 1524 int i;
1503 int size; 1525 int size;
1504 int size0 = 0; 1526 int size0 = 0;
1505 u32 f0 = 0; 1527 u32 f0;
1506 int ind; 1528 int ind;
1507 u8 op0 = 0; 1529 u8 op0 = 0;
1508 1530
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1686 if (!size0) { 1708 if (!size0) {
1687 size0 = size; 1709 size0 = size;
1688 op0 = mthca_opcode[wr->opcode]; 1710 op0 = mthca_opcode[wr->opcode];
1711 f0 = wr->send_flags & IB_SEND_FENCE ?
1712 MTHCA_SEND_DOORBELL_FENCE : 0;
1689 } 1713 }
1690 1714
1691 ++ind; 1715 ++ind;
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1843 int i; 1867 int i;
1844 int size; 1868 int size;
1845 int size0 = 0; 1869 int size0 = 0;
1846 u32 f0 = 0; 1870 u32 f0;
1847 int ind; 1871 int ind;
1848 u8 op0 = 0; 1872 u8 op0 = 0;
1849 1873
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2051 if (!size0) { 2075 if (!size0) {
2052 size0 = size; 2076 size0 = size;
2053 op0 = mthca_opcode[wr->opcode]; 2077 op0 = mthca_opcode[wr->opcode];
2078 f0 = wr->send_flags & IB_SEND_FENCE ?
2079 MTHCA_SEND_DOORBELL_FENCE : 0;
2054 } 2080 }
2055 2081
2056 ++ind; 2082 ++ind;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 34b0da5cfa0a..1437d7ee3b19 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
378 return iser_conn_set_full_featured_mode(conn); 378 return iser_conn_set_full_featured_mode(conn);
379} 379}
380 380
381static void
382iscsi_iser_conn_terminate(struct iscsi_conn *conn)
383{
384 struct iscsi_iser_conn *iser_conn = conn->dd_data;
385 struct iser_conn *ib_conn = iser_conn->ib_conn;
386
387 BUG_ON(!ib_conn);
388 /* starts conn teardown process, waits until all previously *
389 * posted buffers get flushed, deallocates all conn resources */
390 iser_conn_terminate(ib_conn);
391 iser_conn->ib_conn = NULL;
392 conn->recv_lock = NULL;
393}
394
395
396static struct iscsi_transport iscsi_iser_transport; 381static struct iscsi_transport iscsi_iser_transport;
397 382
398static struct iscsi_cls_session * 383static struct iscsi_cls_session *
@@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
555static void 540static void
556iscsi_iser_ep_disconnect(__u64 ep_handle) 541iscsi_iser_ep_disconnect(__u64 ep_handle)
557{ 542{
558 struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); 543 struct iser_conn *ib_conn;
559 544
545 ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
560 if (!ib_conn) 546 if (!ib_conn)
561 return; 547 return;
562 548
563 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); 549 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
564
565 iser_conn_terminate(ib_conn); 550 iser_conn_terminate(ib_conn);
566} 551}
567 552
@@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = {
614 .get_session_param = iscsi_session_get_param, 599 .get_session_param = iscsi_session_get_param,
615 .start_conn = iscsi_iser_conn_start, 600 .start_conn = iscsi_iser_conn_start,
616 .stop_conn = iscsi_conn_stop, 601 .stop_conn = iscsi_conn_stop,
617 /* these are called as part of conn recovery */
618 .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
619 .terminate_conn = iscsi_iser_conn_terminate,
620 /* IO */ 602 /* IO */
621 .send_pdu = iscsi_conn_send_pdu, 603 .send_pdu = iscsi_conn_send_pdu,
622 .get_stats = iscsi_iser_conn_get_stats, 604 .get_stats = iscsi_iser_conn_get_stats,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6bfa0cf4b1d2..a86afd0a5ef1 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd)
498 i++; 498 i++;
499 dev->rep[REP_PERIOD] = period[i]; 499 dev->rep[REP_PERIOD] = period[i];
500 500
501 while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) 501 while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY])
502 j++; 502 j++;
503 dev->rep[REP_DELAY] = delay[j]; 503 dev->rep[REP_DELAY] = delay[j];
504 504
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index a8efc1af36cb..de0f46dd9692 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi)
259 return 1; 259 return 1;
260} 260}
261 261
262static struct key_entry keymap_empty[] __initdata = { 262static struct key_entry keymap_empty[] = {
263 { KE_END, 0 } 263 { KE_END, 0 }
264}; 264};
265 265
266static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { 266static struct key_entry keymap_fs_amilo_pro_v2000[] = {
267 { KE_KEY, 0x01, KEY_HELP }, 267 { KE_KEY, 0x01, KEY_HELP },
268 { KE_KEY, 0x11, KEY_PROG1 }, 268 { KE_KEY, 0x11, KEY_PROG1 },
269 { KE_KEY, 0x12, KEY_PROG2 }, 269 { KE_KEY, 0x12, KEY_PROG2 },
@@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
273 { KE_END, 0 } 273 { KE_END, 0 }
274}; 274};
275 275
276static struct key_entry keymap_fujitsu_n3510[] __initdata = { 276static struct key_entry keymap_fujitsu_n3510[] = {
277 { KE_KEY, 0x11, KEY_PROG1 }, 277 { KE_KEY, 0x11, KEY_PROG1 },
278 { KE_KEY, 0x12, KEY_PROG2 }, 278 { KE_KEY, 0x12, KEY_PROG2 },
279 { KE_KEY, 0x36, KEY_WWW }, 279 { KE_KEY, 0x36, KEY_WWW },
@@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = {
285 { KE_END, 0 } 285 { KE_END, 0 }
286}; 286};
287 287
288static struct key_entry keymap_wistron_ms2111[] __initdata = { 288static struct key_entry keymap_wistron_ms2111[] = {
289 { KE_KEY, 0x11, KEY_PROG1 }, 289 { KE_KEY, 0x11, KEY_PROG1 },
290 { KE_KEY, 0x12, KEY_PROG2 }, 290 { KE_KEY, 0x12, KEY_PROG2 },
291 { KE_KEY, 0x13, KEY_PROG3 }, 291 { KE_KEY, 0x13, KEY_PROG3 },
@@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = {
294 { KE_END, 0 } 294 { KE_END, 0 }
295}; 295};
296 296
297static struct key_entry keymap_wistron_ms2141[] __initdata = { 297static struct key_entry keymap_wistron_ms2141[] = {
298 { KE_KEY, 0x11, KEY_PROG1 }, 298 { KE_KEY, 0x11, KEY_PROG1 },
299 { KE_KEY, 0x12, KEY_PROG2 }, 299 { KE_KEY, 0x12, KEY_PROG2 },
300 { KE_WIFI, 0x30, 0 }, 300 { KE_WIFI, 0x30, 0 },
@@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = {
307 { KE_END, 0 } 307 { KE_END, 0 }
308}; 308};
309 309
310static struct key_entry keymap_acer_aspire_1500[] __initdata = { 310static struct key_entry keymap_acer_aspire_1500[] = {
311 { KE_KEY, 0x11, KEY_PROG1 }, 311 { KE_KEY, 0x11, KEY_PROG1 },
312 { KE_KEY, 0x12, KEY_PROG2 }, 312 { KE_KEY, 0x12, KEY_PROG2 },
313 { KE_WIFI, 0x30, 0 }, 313 { KE_WIFI, 0x30, 0 },
@@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = {
317 { KE_END, 0 } 317 { KE_END, 0 }
318}; 318};
319 319
320static struct key_entry keymap_acer_travelmate_240[] __initdata = { 320static struct key_entry keymap_acer_travelmate_240[] = {
321 { KE_KEY, 0x31, KEY_MAIL }, 321 { KE_KEY, 0x31, KEY_MAIL },
322 { KE_KEY, 0x36, KEY_WWW }, 322 { KE_KEY, 0x36, KEY_WWW },
323 { KE_KEY, 0x11, KEY_PROG1 }, 323 { KE_KEY, 0x11, KEY_PROG1 },
@@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = {
327 { KE_END, 0 } 327 { KE_END, 0 }
328}; 328};
329 329
330static struct key_entry keymap_aopen_1559as[] __initdata = { 330static struct key_entry keymap_aopen_1559as[] = {
331 { KE_KEY, 0x01, KEY_HELP }, 331 { KE_KEY, 0x01, KEY_HELP },
332 { KE_KEY, 0x06, KEY_PROG3 }, 332 { KE_KEY, 0x06, KEY_PROG3 },
333 { KE_KEY, 0x11, KEY_PROG1 }, 333 { KE_KEY, 0x11, KEY_PROG1 },
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 8bc9f51ae6c2..343afa38f4c2 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
485 param[0] = 40; 485 param[0] = 40;
486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); 486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
487 487
488 param[0] = 200;
489 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
490 param[0] = 200;
491 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
492 param[0] = 60;
493 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
494
495 if (set_properties) { 488 if (set_properties) {
496 set_bit(BTN_MIDDLE, psmouse->dev->keybit); 489 set_bit(BTN_MIDDLE, psmouse->dev->keybit);
497 set_bit(REL_WHEEL, psmouse->dev->relbit); 490 set_bit(REL_WHEEL, psmouse->dev->relbit);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index be48cedf986b..c54de989eb00 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
255 struct region *reg, *nreg; 255 struct region *reg, *nreg;
256 256
257 read_unlock(&rh->hash_lock); 257 read_unlock(&rh->hash_lock);
258 nreg = mempool_alloc(rh->region_pool, GFP_NOIO); 258 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
259 if (unlikely(!nreg))
260 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
259 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? 261 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
260 RH_CLEAN : RH_NOSYNC; 262 RH_CLEAN : RH_NOSYNC;
261 nreg->rh = rh; 263 nreg->rh = rh;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6d16022a53e..8dbab2ef3885 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
1597 1597
1598repeat: 1598repeat:
1599 spin_lock_irq(&mddev->write_lock); 1599 spin_lock_irq(&mddev->write_lock);
1600
1601 if (mddev->degraded && mddev->sb_dirty == 3)
1602 /* If the array is degraded, then skipping spares is both
1603 * dangerous and fairly pointless.
1604 * Dangerous because a device that was removed from the array
1605 * might have a event_count that still looks up-to-date,
1606 * so it can be re-added without a resync.
1607 * Pointless because if there are any spares to skip,
1608 * then a recovery will happen and soon that array won't
1609 * be degraded any more and the spare can go back to sleep then.
1610 */
1611 mddev->sb_dirty = 1;
1612
1600 sync_req = mddev->in_sync; 1613 sync_req = mddev->in_sync;
1601 mddev->utime = get_seconds(); 1614 mddev->utime = get_seconds();
1602 if (mddev->sb_dirty == 3) 1615 if (mddev->sb_dirty == 3)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1efe22a2d041..87bfe9e7d8ca 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1625 return 0; 1625 return 0;
1626 } 1626 }
1627 1627
1628 /* before building a request, check if we can skip these blocks..
1629 * This call the bitmap_start_sync doesn't actually record anything
1630 */
1631 if (mddev->bitmap == NULL && 1628 if (mddev->bitmap == NULL &&
1632 mddev->recovery_cp == MaxSector && 1629 mddev->recovery_cp == MaxSector &&
1630 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1633 conf->fullsync == 0) { 1631 conf->fullsync == 0) {
1634 *skipped = 1; 1632 *skipped = 1;
1635 return max_sector - sector_nr; 1633 return max_sector - sector_nr;
1636 } 1634 }
1635 /* before building a request, check if we can skip these blocks..
1636 * This call the bitmap_start_sync doesn't actually record anything
1637 */
1637 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1638 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1638 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1639 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1639 /* We can skip this block, and probably several more */ 1640 /* We can skip this block, and probably several more */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d4cb144ab402..c537d71c18e4 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
641 struct list_head fc_rports; 641 struct list_head fc_rports;
642 spinlock_t fc_rescan_work_lock; 642 spinlock_t fc_rescan_work_lock;
643 int fc_rescan_work_count;
644 struct work_struct fc_rescan_work; 643 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 644 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 645 struct workqueue_struct *fc_rescan_work_q;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 90da7d63b08e..85696f34c310 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
669 * if still doing discovery, 669 * if still doing discovery,
670 * hang loose a while until finished 670 * hang loose a while until finished
671 */ 671 */
672 if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { 672 if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
673 (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
674 (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
675 == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
673 if (count-- > 0) { 676 if (count-- > 0) {
674 msleep(100); 677 msleep(100);
675 goto try_again; 678 goto try_again;
@@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg)
895{ 898{
896 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 899 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
897 int ii; 900 int ii;
898 int work_to_do;
899 u64 pn; 901 u64 pn;
900 unsigned long flags;
901 struct mptfc_rport_info *ri; 902 struct mptfc_rport_info *ri;
902 903
903 do { 904 /* start by tagging all ports as missing */
904 /* start by tagging all ports as missing */ 905 list_for_each_entry(ri, &ioc->fc_rports, list) {
905 list_for_each_entry(ri, &ioc->fc_rports, list) { 906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { 907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
908 }
909 } 908 }
909 }
910 910
911 /* 911 /*
912 * now rescan devices known to adapter, 912 * now rescan devices known to adapter,
913 * will reregister existing rports 913 * will reregister existing rports
914 */ 914 */
915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { 915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
916 (void) mptfc_GetFcPortPage0(ioc, ii); 916 (void) mptfc_GetFcPortPage0(ioc, ii);
917 mptfc_init_host_attr(ioc,ii); /* refresh */ 917 mptfc_init_host_attr(ioc, ii); /* refresh */
918 mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); 918 mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
919 } 919 }
920 920
921 /* delete devices still missing */ 921 /* delete devices still missing */
922 list_for_each_entry(ri, &ioc->fc_rports, list) { 922 list_for_each_entry(ri, &ioc->fc_rports, list) {
923 /* if newly missing, delete it */ 923 /* if newly missing, delete it */
924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { 924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
925 925
926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| 926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
927 MPT_RPORT_INFO_FLAGS_MISSING); 927 MPT_RPORT_INFO_FLAGS_MISSING);
928 fc_remote_port_delete(ri->rport); /* won't sleep */ 928 fc_remote_port_delete(ri->rport); /* won't sleep */
929 ri->rport = NULL; 929 ri->rport = NULL;
930 930
931 pn = (u64)ri->pg0.WWPN.High << 32 | 931 pn = (u64)ri->pg0.WWPN.High << 32 |
932 (u64)ri->pg0.WWPN.Low; 932 (u64)ri->pg0.WWPN.Low;
933 dfcprintk ((MYIOC_s_INFO_FMT 933 dfcprintk ((MYIOC_s_INFO_FMT
934 "mptfc_rescan.%d: %llx deleted\n", 934 "mptfc_rescan.%d: %llx deleted\n",
935 ioc->name, 935 ioc->name,
936 ioc->sh->host_no, 936 ioc->sh->host_no,
937 (unsigned long long)pn)); 937 (unsigned long long)pn));
938 }
939 } 938 }
940 939 }
941 /*
942 * allow multiple passes as target state
943 * might have changed during scan
944 */
945 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
946 if (ioc->fc_rescan_work_count > 2) /* only need one more */
947 ioc->fc_rescan_work_count = 2;
948 work_to_do = --ioc->fc_rescan_work_count;
949 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
950 } while (work_to_do);
951} 940}
952 941
953static int 942static int
@@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1159 * by doing it via the workqueue, some locking is eliminated 1148 * by doing it via the workqueue, some locking is eliminated
1160 */ 1149 */
1161 1150
1162 ioc->fc_rescan_work_count = 1;
1163 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); 1151 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
1164 flush_workqueue(ioc->fc_rescan_work_q); 1152 flush_workqueue(ioc->fc_rescan_work_q);
1165 1153
@@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
1202 case MPI_EVENT_RESCAN: 1190 case MPI_EVENT_RESCAN:
1203 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1191 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1204 if (ioc->fc_rescan_work_q) { 1192 if (ioc->fc_rescan_work_q) {
1205 if (ioc->fc_rescan_work_count++ == 0) { 1193 queue_work(ioc->fc_rescan_work_q,
1206 queue_work(ioc->fc_rescan_work_q, 1194 &ioc->fc_rescan_work);
1207 &ioc->fc_rescan_work);
1208 }
1209 } 1195 }
1210 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1196 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1211 break; 1197 break;
@@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1248 mptfc_SetFcPortPage1_defaults(ioc); 1234 mptfc_SetFcPortPage1_defaults(ioc);
1249 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1235 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1250 if (ioc->fc_rescan_work_q) { 1236 if (ioc->fc_rescan_work_q) {
1251 if (ioc->fc_rescan_work_count++ == 0) { 1237 queue_work(ioc->fc_rescan_work_q,
1252 queue_work(ioc->fc_rescan_work_q, 1238 &ioc->fc_rescan_work);
1253 &ioc->fc_rescan_work);
1254 }
1255 } 1239 }
1256 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1240 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1257 } 1241 }
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index d7897dc6b3c8..a0ba07c36ee9 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
130 if (ctrl & NAND_CTRL_CHANGE) { 130 if (ctrl & NAND_CTRL_CHANGE) {
131 unsigned long bits; 131 unsigned long bits;
132 132
133 bits = (~ctrl & NAND_NCE) << 2; 133 bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
134 bits |= (ctrl & NAND_CLE) << 7; 134 bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
135 bits |= (ctrl & NAND_ALE) << 6; 135 bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
136 136
137 ams_delta_latch2_write(0xC2, bits); 137 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
138 AMS_DELTA_LATCH2_NAND_ALE |
139 AMS_DELTA_LATCH2_NAND_NCE, bits);
138 } 140 }
139 141
140 if (cmd != NAND_CMD_NONE) 142 if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 62b861304e03..c8cbc00243fe 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1093 1093
1094 ret = nand_do_read_ops(mtd, from, &chip->ops); 1094 ret = nand_do_read_ops(mtd, from, &chip->ops);
1095 1095
1096 *retlen = chip->ops.retlen;
1097
1096 nand_release_device(mtd); 1098 nand_release_device(mtd);
1097 1099
1098 *retlen = chip->ops.retlen;
1099 return ret; 1100 return ret;
1100} 1101}
1101 1102
@@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1691 1692
1692 ret = nand_do_write_ops(mtd, to, &chip->ops); 1693 ret = nand_do_write_ops(mtd, to, &chip->ops);
1693 1694
1695 *retlen = chip->ops.retlen;
1696
1694 nand_release_device(mtd); 1697 nand_release_device(mtd);
1695 1698
1696 *retlen = chip->ops.retlen;
1697 return ret; 1699 return ret;
1698} 1700}
1699 1701
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 3c148eaf2f4d..8a60f391ffcf 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM
76 76
77config HOTPLUG_PCI_ACPI 77config HOTPLUG_PCI_ACPI
78 tristate "ACPI PCI Hotplug driver" 78 tristate "ACPI PCI Hotplug driver"
79 depends on ACPI_DOCK && HOTPLUG_PCI 79 depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI)
80 help 80 help
81 Say Y here if you have a system that supports PCI Hotplug using 81 Say Y here if you have a system that supports PCI Hotplug using
82 ACPI. 82 ACPI.
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 02be74caa89f..4afcaffd031c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot)
254 254
255int cpci_configure_slot(struct slot* slot) 255int cpci_configure_slot(struct slot* slot)
256{ 256{
257 unsigned char busnr; 257 struct pci_bus *parent;
258 struct pci_bus *child; 258 int fn;
259 259
260 dbg("%s - enter", __FUNCTION__); 260 dbg("%s - enter", __FUNCTION__);
261 261
@@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot)
276 */ 276 */
277 n = pci_scan_slot(slot->bus, slot->devfn); 277 n = pci_scan_slot(slot->bus, slot->devfn);
278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); 278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
279 if (n > 0)
280 pci_bus_add_devices(slot->bus);
281 slot->dev = pci_get_slot(slot->bus, slot->devfn); 279 slot->dev = pci_get_slot(slot->bus, slot->devfn);
282 if (slot->dev == NULL) { 280 if (slot->dev == NULL) {
283 err("Could not find PCI device for slot %02x", slot->number); 281 err("Could not find PCI device for slot %02x", slot->number);
284 return 1; 282 return -ENODEV;
285 } 283 }
286 } 284 }
287 285 parent = slot->dev->bus;
288 if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 286
289 pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); 287 for (fn = 0; fn < 8; fn++) {
290 child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); 288 struct pci_dev *dev;
291 pci_do_scan_bus(child); 289
292 pci_bus_size_bridges(child); 290 dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
291 if (!dev)
292 continue;
293 if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
294 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
295 /* Find an unused bus number for the new bridge */
296 struct pci_bus *child;
297 unsigned char busnr, start = parent->secondary;
298 unsigned char end = parent->subordinate;
299
300 for (busnr = start; busnr <= end; busnr++) {
301 if (!pci_find_bus(pci_domain_nr(parent),
302 busnr))
303 break;
304 }
305 if (busnr >= end) {
306 err("No free bus for hot-added bridge\n");
307 pci_dev_put(dev);
308 continue;
309 }
310 child = pci_add_new_bus(parent, dev, busnr);
311 if (!child) {
312 err("Cannot add new bus for %s\n",
313 pci_name(dev));
314 pci_dev_put(dev);
315 continue;
316 }
317 child->subordinate = pci_do_scan_bus(child);
318 pci_bus_size_bridges(child);
319 }
320 pci_dev_put(dev);
293 } 321 }
294 322
295 pci_bus_assign_resources(slot->dev->bus); 323 pci_bus_assign_resources(parent);
324 pci_bus_add_devices(parent);
325 pci_enable_bridges(parent);
296 326
297 dbg("%s - exit", __FUNCTION__); 327 dbg("%s - exit", __FUNCTION__);
298 return 0; 328 return 0;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 10e1a905c144..474e9cd0e9e4 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
139/** 139/**
140 * pci_match_device - Tell if a PCI device structure has a matching 140 * pci_match_device - Tell if a PCI device structure has a matching
141 * PCI device id structure 141 * PCI device id structure
142 * @ids: array of PCI device id structures to search in
143 * @dev: the PCI device structure to match against
144 * @drv: the PCI driver to match against 142 * @drv: the PCI driver to match against
143 * @dev: the PCI device structure to match against
145 * 144 *
146 * Used by a driver to check whether a PCI device present in the 145 * Used by a driver to check whether a PCI device present in the
147 * system is in its list of supported devices. Returns the matching 146 * system is in its list of supported devices. Returns the matching
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fb08bc951ac0..73177429fe74 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
438 pci_read_config_dword(dev, 0x48, &region); 438 pci_read_config_dword(dev, 0x48, &region);
439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); 439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
440} 440}
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi );
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); 442DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
442 443
443/* 444/*
@@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
1091DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); 1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); 1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); 1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
1095 1095
1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1097{ 1097{
@@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1518} 1518}
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); 1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
1520 1520
1521static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1522{
1523 u16 command;
1524 u32 bar;
1525 u8 __iomem *csr;
1526 u8 cmd_hi;
1527
1528 switch (dev->device) {
1529 /* PCI IDs taken from drivers/net/e100.c */
1530 case 0x1029:
1531 case 0x1030 ... 0x1034:
1532 case 0x1038 ... 0x103E:
1533 case 0x1050 ... 0x1057:
1534 case 0x1059:
1535 case 0x1064 ... 0x106B:
1536 case 0x1091 ... 0x1095:
1537 case 0x1209:
1538 case 0x1229:
1539 case 0x2449:
1540 case 0x2459:
1541 case 0x245D:
1542 case 0x27DC:
1543 break;
1544 default:
1545 return;
1546 }
1547
1548 /*
1549 * Some firmware hands off the e100 with interrupts enabled,
1550 * which can cause a flood of interrupts if packets are
1551 * received before the driver attaches to the device. So
1552 * disable all e100 interrupts here. The driver will
1553 * re-enable them when it's ready.
1554 */
1555 pci_read_config_word(dev, PCI_COMMAND, &command);
1556 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
1557
1558 if (!(command & PCI_COMMAND_MEMORY) || !bar)
1559 return;
1560
1561 csr = ioremap(bar, 8);
1562 if (!csr) {
1563 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
1564 pci_name(dev));
1565 return;
1566 }
1567
1568 cmd_hi = readb(csr + 3);
1569 if (cmd_hi == 0) {
1570 printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts "
1571 "enabled, disabling\n", pci_name(dev));
1572 writeb(1, csr + 3);
1573 }
1574
1575 iounmap(csr);
1576}
1577DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
1521 1578
1522static void __devinit fixup_rev1_53c810(struct pci_dev* dev) 1579static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1523{ 1580{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d6d1bff52b8e..2c7de79c83b9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to)
69 69
70 pr_debug("%s: aie=%d\n", __FUNCTION__, to); 70 pr_debug("%s: aie=%d\n", __FUNCTION__, to);
71 71
72 tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 72 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
73 73
74 if (to) 74 if (to)
75 tmp |= S3C2410_RTCALM_ALMEN; 75 tmp |= S3C2410_RTCALM_ALMEN;
76 76
77 writeb(tmp, S3C2410_RTCALM); 77 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
78} 78}
79 79
80static void s3c_rtc_setpie(int to) 80static void s3c_rtc_setpie(int to)
@@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to)
84 pr_debug("%s: pie=%d\n", __FUNCTION__, to); 84 pr_debug("%s: pie=%d\n", __FUNCTION__, to);
85 85
86 spin_lock_irq(&s3c_rtc_pie_lock); 86 spin_lock_irq(&s3c_rtc_pie_lock);
87 tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; 87 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
88 88
89 if (to) 89 if (to)
90 tmp |= S3C2410_TICNT_ENABLE; 90 tmp |= S3C2410_TICNT_ENABLE;
91 91
92 writeb(tmp, S3C2410_TICNT); 92 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
93 spin_unlock_irq(&s3c_rtc_pie_lock); 93 spin_unlock_irq(&s3c_rtc_pie_lock);
94} 94}
95 95
@@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq)
98 unsigned int tmp; 98 unsigned int tmp;
99 99
100 spin_lock_irq(&s3c_rtc_pie_lock); 100 spin_lock_irq(&s3c_rtc_pie_lock);
101 tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; 101 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
102 102
103 s3c_rtc_freq = freq; 103 s3c_rtc_freq = freq;
104 104
105 tmp |= (128 / freq)-1; 105 tmp |= (128 / freq)-1;
106 106
107 writeb(tmp, S3C2410_TICNT); 107 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
108 spin_unlock_irq(&s3c_rtc_pie_lock); 108 spin_unlock_irq(&s3c_rtc_pie_lock);
109} 109}
110 110
@@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq)
113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
114{ 114{
115 unsigned int have_retried = 0; 115 unsigned int have_retried = 0;
116 void __iomem *base = s3c_rtc_base;
116 117
117 retry_get_time: 118 retry_get_time:
118 rtc_tm->tm_min = readb(S3C2410_RTCMIN); 119 rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
119 rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); 120 rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
120 rtc_tm->tm_mday = readb(S3C2410_RTCDATE); 121 rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
121 rtc_tm->tm_mon = readb(S3C2410_RTCMON); 122 rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
122 rtc_tm->tm_year = readb(S3C2410_RTCYEAR); 123 rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
123 rtc_tm->tm_sec = readb(S3C2410_RTCSEC); 124 rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
124 125
125 /* the only way to work out wether the system was mid-update 126 /* the only way to work out wether the system was mid-update
126 * when we read it is to check the second counter, and if it 127 * when we read it is to check the second counter, and if it
@@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
151 152
152static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) 153static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
153{ 154{
154 /* the rtc gets round the y2k problem by just not supporting it */ 155 void __iomem *base = s3c_rtc_base;
156 int year = tm->tm_year - 100;
155 157
156 if (tm->tm_year < 100) 158 pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
159 tm->tm_year, tm->tm_mon, tm->tm_mday,
160 tm->tm_hour, tm->tm_min, tm->tm_sec);
161
162 /* we get around y2k by simply not supporting it */
163
164 if (year < 0 || year >= 100) {
165 dev_err(dev, "rtc only supports 100 years\n");
157 return -EINVAL; 166 return -EINVAL;
167 }
158 168
159 writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); 169 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC);
160 writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); 170 writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN);
161 writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); 171 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
162 writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); 172 writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
163 writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); 173 writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
164 writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); 174 writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
165 175
166 return 0; 176 return 0;
167} 177}
@@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
169static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) 179static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
170{ 180{
171 struct rtc_time *alm_tm = &alrm->time; 181 struct rtc_time *alm_tm = &alrm->time;
182 void __iomem *base = s3c_rtc_base;
172 unsigned int alm_en; 183 unsigned int alm_en;
173 184
174 alm_tm->tm_sec = readb(S3C2410_ALMSEC); 185 alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
175 alm_tm->tm_min = readb(S3C2410_ALMMIN); 186 alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
176 alm_tm->tm_hour = readb(S3C2410_ALMHOUR); 187 alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
177 alm_tm->tm_mon = readb(S3C2410_ALMMON); 188 alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
178 alm_tm->tm_mday = readb(S3C2410_ALMDATE); 189 alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
179 alm_tm->tm_year = readb(S3C2410_ALMYEAR); 190 alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
180 191
181 alm_en = readb(S3C2410_RTCALM); 192 alm_en = readb(base + S3C2410_RTCALM);
182 193
183 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", 194 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
184 alm_en, 195 alm_en,
@@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
226static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) 237static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227{ 238{
228 struct rtc_time *tm = &alrm->time; 239 struct rtc_time *tm = &alrm->time;
240 void __iomem *base = s3c_rtc_base;
229 unsigned int alrm_en; 241 unsigned int alrm_en;
230 242
231 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", 243 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
@@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
234 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); 246 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
235 247
236 248
237 alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 249 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
238 writeb(0x00, S3C2410_RTCALM); 250 writeb(0x00, base + S3C2410_RTCALM);
239 251
240 if (tm->tm_sec < 60 && tm->tm_sec >= 0) { 252 if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
241 alrm_en |= S3C2410_RTCALM_SECEN; 253 alrm_en |= S3C2410_RTCALM_SECEN;
242 writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); 254 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
243 } 255 }
244 256
245 if (tm->tm_min < 60 && tm->tm_min >= 0) { 257 if (tm->tm_min < 60 && tm->tm_min >= 0) {
246 alrm_en |= S3C2410_RTCALM_MINEN; 258 alrm_en |= S3C2410_RTCALM_MINEN;
247 writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); 259 writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
248 } 260 }
249 261
250 if (tm->tm_hour < 24 && tm->tm_hour >= 0) { 262 if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
251 alrm_en |= S3C2410_RTCALM_HOUREN; 263 alrm_en |= S3C2410_RTCALM_HOUREN;
252 writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); 264 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
253 } 265 }
254 266
255 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); 267 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
256 268
257 writeb(alrm_en, S3C2410_RTCALM); 269 writeb(alrm_en, base + S3C2410_RTCALM);
258 270
259 if (0) { 271 if (0) {
260 alrm_en = readb(S3C2410_RTCALM); 272 alrm_en = readb(base + S3C2410_RTCALM);
261 alrm_en &= ~S3C2410_RTCALM_ALMEN; 273 alrm_en &= ~S3C2410_RTCALM_ALMEN;
262 writeb(alrm_en, S3C2410_RTCALM); 274 writeb(alrm_en, base + S3C2410_RTCALM);
263 disable_irq_wake(s3c_rtc_alarmno); 275 disable_irq_wake(s3c_rtc_alarmno);
264 } 276 }
265 277
@@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev,
319 331
320static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 332static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
321{ 333{
322 unsigned int rtcalm = readb(S3C2410_RTCALM); 334 unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
323 unsigned int ticnt = readb (S3C2410_TICNT); 335 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
324 336
325 seq_printf(seq, "alarm_IRQ\t: %s\n", 337 seq_printf(seq, "alarm_IRQ\t: %s\n",
326 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); 338 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
@@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = {
387 399
388static void s3c_rtc_enable(struct platform_device *pdev, int en) 400static void s3c_rtc_enable(struct platform_device *pdev, int en)
389{ 401{
402 void __iomem *base = s3c_rtc_base;
390 unsigned int tmp; 403 unsigned int tmp;
391 404
392 if (s3c_rtc_base == NULL) 405 if (s3c_rtc_base == NULL)
393 return; 406 return;
394 407
395 if (!en) { 408 if (!en) {
396 tmp = readb(S3C2410_RTCCON); 409 tmp = readb(base + S3C2410_RTCCON);
397 writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); 410 writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
398 411
399 tmp = readb(S3C2410_TICNT); 412 tmp = readb(base + S3C2410_TICNT);
400 writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); 413 writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
401 } else { 414 } else {
402 /* re-enable the device, and check it is ok */ 415 /* re-enable the device, and check it is ok */
403 416
404 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ 417 if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
405 dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); 418 dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
406 419
407 tmp = readb(S3C2410_RTCCON); 420 tmp = readb(base + S3C2410_RTCCON);
408 writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); 421 writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
409 } 422 }
410 423
411 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ 424 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
412 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); 425 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
413 426
414 tmp = readb(S3C2410_RTCCON); 427 tmp = readb(base + S3C2410_RTCCON);
415 writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); 428 writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
416 } 429 }
417 430
418 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ 431 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
419 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); 432 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
420 433
421 tmp = readb(S3C2410_RTCCON); 434 tmp = readb(base + S3C2410_RTCCON);
422 writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); 435 writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
423 } 436 }
424 } 437 }
425} 438}
@@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
475 } 488 }
476 489
477 s3c_rtc_mem = request_mem_region(res->start, 490 s3c_rtc_mem = request_mem_region(res->start,
478 res->end-res->start+1, 491 res->end-res->start+1,
479 pdev->name); 492 pdev->name);
480 493
481 if (s3c_rtc_mem == NULL) { 494 if (s3c_rtc_mem == NULL) {
482 dev_err(&pdev->dev, "failed to reserve memory region\n"); 495 dev_err(&pdev->dev, "failed to reserve memory region\n");
@@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
495 508
496 s3c_rtc_enable(pdev, 1); 509 s3c_rtc_enable(pdev, 1);
497 510
498 pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); 511 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
512 readb(s3c_rtc_base + S3C2410_RTCCON));
499 513
500 s3c_rtc_setfreq(s3c_rtc_freq); 514 s3c_rtc_setfreq(s3c_rtc_freq);
501 515
@@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
543 557
544 /* save TICNT for anyone using periodic interrupts */ 558 /* save TICNT for anyone using periodic interrupts */
545 559
546 ticnt_save = readb(S3C2410_TICNT); 560 ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
547 561
548 /* calculate time delta for suspend */ 562 /* calculate time delta for suspend */
549 563
@@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
567 rtc_tm_to_time(&tm, &time.tv_sec); 581 rtc_tm_to_time(&tm, &time.tv_sec);
568 restore_time_delta(&s3c_rtc_delta, &time); 582 restore_time_delta(&s3c_rtc_delta, &time);
569 583
570 writeb(ticnt_save, S3C2410_TICNT); 584 writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
571 return 0; 585 return 0;
572} 586}
573#else 587#else
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9d0c6e1a0e66..9af02c79ce8a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -54,11 +54,11 @@ struct dasd_devmap {
54 */ 54 */
55struct dasd_server_ssid_map { 55struct dasd_server_ssid_map {
56 struct list_head list; 56 struct list_head list;
57 struct server_id { 57 struct system_id {
58 char vendor[4]; 58 char vendor[4];
59 char serial[15]; 59 char serial[15];
60 __u16 ssid;
60 } sid; 61 } sid;
61 __u16 ssid;
62}; 62};
63 63
64static struct list_head dasd_server_ssid_list; 64static struct list_head dasd_server_ssid_list;
@@ -904,14 +904,14 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
904 return -ENOMEM; 904 return -ENOMEM;
905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); 905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); 906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
907 srv->ssid = uid->ssid; 907 srv->sid.ssid = uid->ssid;
908 908
909 /* server is already contained ? */ 909 /* server is already contained ? */
910 spin_lock(&dasd_devmap_lock); 910 spin_lock(&dasd_devmap_lock);
911 devmap->uid = *uid; 911 devmap->uid = *uid;
912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) { 912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
913 if (!memcmp(&srv->sid, &tmp->sid, 913 if (!memcmp(&srv->sid, &tmp->sid,
914 sizeof(struct dasd_server_ssid_map))) { 914 sizeof(struct system_id))) {
915 kfree(srv); 915 kfree(srv);
916 srv = NULL; 916 srv = NULL;
917 break; 917 break;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 957ed5db98e4..b7a7fac3f7c3 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
607 * Valide storage server of current device. 607 * Valide storage server of current device.
608 */ 608 */
609static int 609static int
610dasd_eckd_validate_server(struct dasd_device *device) 610dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
611{ 611{
612 int rc; 612 int rc;
613 613
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
616 return 0; 616 return 0;
617 617
618 rc = dasd_eckd_psf_ssc(device); 618 rc = dasd_eckd_psf_ssc(device);
619 if (rc) 619 /* may be requested feature is not available on server,
620 /* may be requested feature is not available on server, 620 * therefore just report error and go ahead */
621 * therefore just report error and go ahead */ 621 DEV_MESSAGE(KERN_INFO, device,
622 DEV_MESSAGE(KERN_INFO, device, 622 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
623 "Perform Subsystem Function returned rc=%d", rc); 623 uid->vendor, uid->serial, uid->ssid, rc);
624 /* RE-Read Configuration Data */ 624 /* RE-Read Configuration Data */
625 return dasd_eckd_read_conf(device); 625 return dasd_eckd_read_conf(device);
626} 626}
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
666 return rc; 666 return rc;
667 rc = dasd_set_uid(device->cdev, &uid); 667 rc = dasd_set_uid(device->cdev, &uid);
668 if (rc == 1) /* new server found */ 668 if (rc == 1) /* new server found */
669 rc = dasd_eckd_validate_server(device); 669 rc = dasd_eckd_validate_server(device, &uid);
670 if (rc) 670 if (rc)
671 return rc; 671 return rc;
672 672
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cd789b8acd4..adc9d8f2c28f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
112 printk("\n"); 112 printk("\n");
113} 113}
114 114
115
116/****************************************************************/
117/****** Functions to handle the request ID hash table ********/
118/****************************************************************/
119
120#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
121
122static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
123{
124 int i;
125
126 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
127 GFP_KERNEL);
128
129 if (!adapter->req_list)
130 return -ENOMEM;
131
132 for (i=0; i<REQUEST_LIST_SIZE; i++)
133 INIT_LIST_HEAD(&adapter->req_list[i]);
134
135 return 0;
136}
137
138static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
139{
140 struct zfcp_fsf_req *request, *tmp;
141 unsigned int i;
142
143 for (i=0; i<REQUEST_LIST_SIZE; i++) {
144 if (list_empty(&adapter->req_list[i]))
145 continue;
146
147 list_for_each_entry_safe(request, tmp,
148 &adapter->req_list[i], list)
149 list_del(&request->list);
150 }
151
152 kfree(adapter->req_list);
153}
154
155void zfcp_reqlist_add(struct zfcp_adapter *adapter,
156 struct zfcp_fsf_req *fsf_req)
157{
158 unsigned int i;
159
160 i = fsf_req->req_id % REQUEST_LIST_SIZE;
161 list_add_tail(&fsf_req->list, &adapter->req_list[i]);
162}
163
164void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
165{
166 struct zfcp_fsf_req *request, *tmp;
167 unsigned int i, counter;
168 u64 dbg_tmp[2];
169
170 i = req_id % REQUEST_LIST_SIZE;
171 BUG_ON(list_empty(&adapter->req_list[i]));
172
173 counter = 0;
174 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
175 if (request->req_id == req_id) {
176 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
177 dbg_tmp[1] = (u64) counter;
178 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
179 list_del(&request->list);
180 break;
181 }
182 counter++;
183 }
184}
185
186struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
187 unsigned long req_id)
188{
189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i;
191
192 i = req_id % REQUEST_LIST_SIZE;
193
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
195 if (request->req_id == req_id)
196 return request;
197
198 return NULL;
199}
200
201int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
202{
203 unsigned int i;
204
205 for (i=0; i<REQUEST_LIST_SIZE; i++)
206 if (!list_empty(&adapter->req_list[i]))
207 return 0;
208
209 return 1;
210}
211
212#undef ZFCP_LOG_AREA
213
115/****************************************************************/ 214/****************************************************************/
116/************** Uncategorised Functions *************************/ 215/************** Uncategorised Functions *************************/
117/****************************************************************/ 216/****************************************************************/
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
961 INIT_LIST_HEAD(&adapter->port_remove_lh); 1060 INIT_LIST_HEAD(&adapter->port_remove_lh);
962 1061
963 /* initialize list of fsf requests */ 1062 /* initialize list of fsf requests */
964 spin_lock_init(&adapter->fsf_req_list_lock); 1063 spin_lock_init(&adapter->req_list_lock);
965 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 1064 retval = zfcp_reqlist_init(adapter);
1065 if (retval) {
1066 ZFCP_LOG_INFO("request list initialization failed\n");
1067 goto failed_low_mem_buffers;
1068 }
966 1069
967 /* initialize debug locks */ 1070 /* initialize debug locks */
968 1071
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1041 * !0 - struct zfcp_adapter data structure could not be removed 1144 * !0 - struct zfcp_adapter data structure could not be removed
1042 * (e.g. still used) 1145 * (e.g. still used)
1043 * locks: adapter list write lock is assumed to be held by caller 1146 * locks: adapter list write lock is assumed to be held by caller
1044 * adapter->fsf_req_list_lock is taken and released within this
1045 * function and must not be held on entry
1046 */ 1147 */
1047void 1148void
1048zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 1149zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1054 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 1155 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1055 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 1156 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1056 /* sanity check: no pending FSF requests */ 1157 /* sanity check: no pending FSF requests */
1057 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 1158 spin_lock_irqsave(&adapter->req_list_lock, flags);
1058 retval = !list_empty(&adapter->fsf_req_list_head); 1159 retval = zfcp_reqlist_isempty(adapter);
1059 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 1160 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1060 if (retval) { 1161 if (!retval) {
1061 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 1162 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
1062 "%i requests outstanding\n", 1163 "%i requests outstanding\n",
1063 zfcp_get_busid_by_adapter(adapter), adapter, 1164 zfcp_get_busid_by_adapter(adapter), adapter,
1064 atomic_read(&adapter->fsf_reqs_active)); 1165 atomic_read(&adapter->reqs_active));
1065 retval = -EBUSY; 1166 retval = -EBUSY;
1066 goto out; 1167 goto out;
1067 } 1168 }
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1087 zfcp_free_low_mem_buffers(adapter); 1188 zfcp_free_low_mem_buffers(adapter);
1088 /* free memory of adapter data structure and queues */ 1189 /* free memory of adapter data structure and queues */
1089 zfcp_qdio_free_queues(adapter); 1190 zfcp_qdio_free_queues(adapter);
1191 zfcp_reqlist_free(adapter);
1090 kfree(adapter->fc_stats); 1192 kfree(adapter->fc_stats);
1091 kfree(adapter->stats_reset_data); 1193 kfree(adapter->stats_reset_data);
1092 ZFCP_LOG_TRACE("freeing adapter structure\n"); 1194 ZFCP_LOG_TRACE("freeing adapter structure\n");
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 57d8e4bfb8d9..fdabadeaa9ee 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
164 retval = zfcp_adapter_scsi_register(adapter); 164 retval = zfcp_adapter_scsi_register(adapter);
165 if (retval) 165 if (retval)
166 goto out_scsi_register; 166 goto out_scsi_register;
167
168 /* initialize request counter */
169 BUG_ON(!zfcp_reqlist_isempty(adapter));
170 adapter->req_no = 0;
171
167 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, 172 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
168 ZFCP_SET); 173 ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 174 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2df512a18e2c..94d1b74db356 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -52,7 +52,7 @@
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
53 53
54/* zfcp version number, it consists of major, minor, and patch-level number */ 54/* zfcp version number, it consists of major, minor, and patch-level number */
55#define ZFCP_VERSION "4.7.0" 55#define ZFCP_VERSION "4.8.0"
56 56
57/** 57/**
58 * zfcp_sg_to_address - determine kernel address from struct scatterlist 58 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
80#define REQUEST_LIST_SIZE 128 80#define REQUEST_LIST_SIZE 128
81 81
82/********************* SCSI SPECIFIC DEFINES *********************************/ 82/********************* SCSI SPECIFIC DEFINES *********************************/
83#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) 83#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
84 84
85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
86 86
@@ -886,11 +886,11 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 886 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 887 removed */
888 u32 ports; /* number of remote ports */ 888 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */ 889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 struct list_head fsf_req_list_head; /* head of FSF req list */ 890 atomic_t reqs_active; /* # active FSF reqs */
891 spinlock_t fsf_req_list_lock; /* lock for ops on list of 891 unsigned long req_no; /* unique FSF req number */
892 FSF requests */ 892 struct list_head *req_list; /* list of pending reqs */
893 atomic_t fsf_reqs_active; /* # active FSF reqs */ 893 spinlock_t req_list_lock; /* request list lock */
894 struct zfcp_qdio_queue request_queue; /* request queue */ 894 struct zfcp_qdio_queue request_queue; /* request queue */
895 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 895 u32 fsf_req_seq_no; /* FSF cmnd seq number */
896 wait_queue_head_t request_wq; /* can be used to wait for 896 wait_queue_head_t request_wq; /* can be used to wait for
@@ -986,6 +986,7 @@ struct zfcp_unit {
986/* FSF request */ 986/* FSF request */
987struct zfcp_fsf_req { 987struct zfcp_fsf_req {
988 struct list_head list; /* list of FSF requests */ 988 struct list_head list; /* list of FSF requests */
989 unsigned long req_id; /* unique request ID */
989 struct zfcp_adapter *adapter; /* adapter request belongs to */ 990 struct zfcp_adapter *adapter; /* adapter request belongs to */
990 u8 sbal_number; /* nr of SBALs free for use */ 991 u8 sbal_number; /* nr of SBALs free for use */
991 u8 sbal_first; /* first SBAL for this request */ 992 u8 sbal_first; /* first SBAL for this request */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8ec8da0beaa8..7f60b6fdf724 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); 67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); 68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 95
96static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); 96static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static int zfcp_erp_action_dismiss_port(struct zfcp_port *); 97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
99static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
100 99
101static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, 100static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
102 struct zfcp_port *, struct zfcp_unit *); 101 struct zfcp_port *, struct zfcp_unit *);
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
135 zfcp_erp_adapter_reopen(adapter, 0); 134 zfcp_erp_adapter_reopen(adapter, 0);
136} 135}
137 136
138/* 137/**
139 * function: zfcp_fsf_scsi_er_timeout_handler 138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
140 * 139 *
141 * purpose: This function needs to be called whenever a SCSI error recovery 140 * This function needs to be called whenever a SCSI error recovery
142 * action (abort/reset) does not return. 141 * action (abort/reset) does not return. Re-opening the adapter means
143 * Re-opening the adapter means that the command can be returned 142 * that the abort/reset command can be returned by zfcp. It won't complete
144 * by zfcp (it is guarranteed that it does not return via the 143 * via the adapter anymore (because qdio queues are closed). If ERP is
145 * adapter anymore). The buffer can then be used again. 144 * already running on this adapter it will be stopped.
146 *
147 * returns: sod all
148 */ 145 */
149void 146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
150zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
151{ 147{
152 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
149 unsigned long flags;
153 150
154 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
155 "Restarting all operations on the adapter %s\n", 152 "Restarting all operations on the adapter %s\n",
156 zfcp_get_busid_by_adapter(adapter)); 153 zfcp_get_busid_by_adapter(adapter));
157 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
158 zfcp_erp_adapter_reopen(adapter, 0);
159 155
160 return; 156 write_lock_irqsave(&adapter->erp_lock, flags);
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
158 &adapter->status)) {
159 zfcp_erp_modify_adapter_status(adapter,
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
161 ZFCP_CLEAR);
162 zfcp_erp_action_dismiss_adapter(adapter);
163 write_unlock_irqrestore(&adapter->erp_lock, flags);
164 /* dismiss all pending requests including requests for ERP */
165 zfcp_fsf_req_dismiss_all(adapter);
166 adapter->fsf_req_seq_no = 0;
167 } else
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0);
161} 170}
162 171
163/* 172/*
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
670 return retval; 679 return retval;
671} 680}
672 681
673/* 682/**
674 * function: 683 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
675 *
676 * purpose: disable I/O,
677 * return any open requests and clean them up,
678 * aim: no pending and incoming I/O
679 *
680 * returns:
681 */ 684 */
682static void 685static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
683zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
684{ 686{
685 debug_text_event(adapter->erp_dbf, 6, "a_bl"); 687 debug_text_event(adapter->erp_dbf, 6, "a_bl");
686 zfcp_erp_modify_adapter_status(adapter, 688 zfcp_erp_modify_adapter_status(adapter,
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
688 clear_mask, ZFCP_CLEAR); 690 clear_mask, ZFCP_CLEAR);
689} 691}
690 692
691/* 693/**
692 * function: 694 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
693 *
694 * purpose: enable I/O
695 *
696 * returns:
697 */ 695 */
698static void 696static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
699zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
700{ 697{
701 debug_text_event(adapter->erp_dbf, 6, "a_ubl"); 698 debug_text_event(adapter->erp_dbf, 6, "a_ubl");
702 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 699 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
848 struct zfcp_adapter *adapter = erp_action->adapter; 845 struct zfcp_adapter *adapter = erp_action->adapter;
849 846
850 if (erp_action->fsf_req) { 847 if (erp_action->fsf_req) {
851 /* take lock to ensure that request is not being deleted meanwhile */ 848 /* take lock to ensure that request is not deleted meanwhile */
852 spin_lock(&adapter->fsf_req_list_lock); 849 spin_lock(&adapter->req_list_lock);
853 /* check whether fsf req does still exist */ 850 if ((!zfcp_reqlist_ismember(adapter,
854 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 851 erp_action->fsf_req->req_id)) &&
855 if (fsf_req == erp_action->fsf_req) 852 (fsf_req->erp_action == erp_action)) {
856 break;
857 if (fsf_req && (fsf_req->erp_action == erp_action)) {
858 /* fsf_req still exists */ 853 /* fsf_req still exists */
859 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 854 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
860 debug_event(adapter->erp_dbf, 3, &fsf_req, 855 debug_event(adapter->erp_dbf, 3, &fsf_req,
861 sizeof (unsigned long)); 856 sizeof (unsigned long));
862 /* dismiss fsf_req of timed out or dismissed erp_action */ 857 /* dismiss fsf_req of timed out/dismissed erp_action */
863 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | 858 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
864 ZFCP_STATUS_ERP_TIMEDOUT)) { 859 ZFCP_STATUS_ERP_TIMEDOUT)) {
865 debug_text_event(adapter->erp_dbf, 3, 860 debug_text_event(adapter->erp_dbf, 3,
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
892 */ 887 */
893 erp_action->fsf_req = NULL; 888 erp_action->fsf_req = NULL;
894 } 889 }
895 spin_unlock(&adapter->fsf_req_list_lock); 890 spin_unlock(&adapter->req_list_lock);
896 } else 891 } else
897 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); 892 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
898 893
899 return retval; 894 return retval;
900} 895}
901 896
902/* 897/**
903 * purpose: generic handler for asynchronous events related to erp_action events 898 * zfcp_erp_async_handler_nolock - complete erp_action
904 * (normal completion, time-out, dismissing, retry after
905 * low memory condition)
906 *
907 * note: deletion of timer is not required (e.g. in case of a time-out),
908 * but a second try does no harm,
909 * we leave it in here to allow for greater simplification
910 * 899 *
911 * returns: 0 - there was an action to handle 900 * Used for normal completion, time-out, dismissal and failure after
912 * !0 - otherwise 901 * low memory condition.
913 */ 902 */
914static int 903static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
915zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 904 unsigned long set_mask)
916 unsigned long set_mask)
917{ 905{
918 int retval;
919 struct zfcp_adapter *adapter = erp_action->adapter; 906 struct zfcp_adapter *adapter = erp_action->adapter;
920 907
921 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { 908 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
926 del_timer(&erp_action->timer); 913 del_timer(&erp_action->timer);
927 erp_action->status |= set_mask; 914 erp_action->status |= set_mask;
928 zfcp_erp_action_ready(erp_action); 915 zfcp_erp_action_ready(erp_action);
929 retval = 0;
930 } else { 916 } else {
931 /* action is ready or gone - nothing to do */ 917 /* action is ready or gone - nothing to do */
932 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); 918 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
933 debug_event(adapter->erp_dbf, 3, &erp_action->action, 919 debug_event(adapter->erp_dbf, 3, &erp_action->action,
934 sizeof (int)); 920 sizeof (int));
935 retval = 1;
936 } 921 }
937
938 return retval;
939} 922}
940 923
941/* 924/**
942 * purpose: generic handler for asynchronous events related to erp_action 925 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
943 * events (normal completion, time-out, dismissing, retry after
944 * low memory condition)
945 *
946 * note: deletion of timer is not required (e.g. in case of a time-out),
947 * but a second try does no harm,
948 * we leave it in here to allow for greater simplification
949 *
950 * returns: 0 - there was an action to handle
951 * !0 - otherwise
952 */ 926 */
953int 927void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
954zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, 928 unsigned long set_mask)
955 unsigned long set_mask)
956{ 929{
957 struct zfcp_adapter *adapter = erp_action->adapter; 930 struct zfcp_adapter *adapter = erp_action->adapter;
958 unsigned long flags; 931 unsigned long flags;
959 int retval;
960 932
961 write_lock_irqsave(&adapter->erp_lock, flags); 933 write_lock_irqsave(&adapter->erp_lock, flags);
962 retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); 934 zfcp_erp_async_handler_nolock(erp_action, set_mask);
963 write_unlock_irqrestore(&adapter->erp_lock, flags); 935 write_unlock_irqrestore(&adapter->erp_lock, flags);
964
965 return retval;
966} 936}
967 937
968/* 938/*
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
999 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); 969 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
1000} 970}
1001 971
1002/* 972/**
1003 * purpose: is called for an erp_action which needs to be ended 973 * zfcp_erp_action_dismiss - dismiss an erp_action
1004 * though not being done,
1005 * this is usually required if an higher is generated,
1006 * action gets an appropriate flag and will be processed
1007 * accordingly
1008 * 974 *
1009 * locks: erp_lock held (thus we need to call another handler variant) 975 * adapter->erp_lock must be held
976 *
977 * Dismissal of an erp_action is usually required if an erp_action of
978 * higher priority is generated.
1010 */ 979 */
1011static int 980static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1012zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1013{ 981{
1014 struct zfcp_adapter *adapter = erp_action->adapter; 982 struct zfcp_adapter *adapter = erp_action->adapter;
1015 983
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1017 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); 985 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1018 986
1019 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); 987 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
1020
1021 return 0;
1022} 988}
1023 989
1024int 990int
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2074 return retval; 2040 return retval;
2075} 2041}
2076 2042
2077/* 2043/**
2078 * function: zfcp_qdio_cleanup 2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2079 *
2080 * purpose: cleans up QDIO operation for the specified adapter
2081 *
2082 * returns: 0 - successful cleanup
2083 * !0 - failed cleanup
2084 */ 2045 */
2085int 2046static void
2086zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) 2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2087{ 2048{
2088 int retval = ZFCP_ERP_SUCCEEDED;
2089 int first_used; 2049 int first_used;
2090 int used_count; 2050 int used_count;
2091 struct zfcp_adapter *adapter = erp_action->adapter; 2051 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2094 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " 2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2095 "queues on adapter %s\n", 2055 "queues on adapter %s\n",
2096 zfcp_get_busid_by_adapter(adapter)); 2056 zfcp_get_busid_by_adapter(adapter));
2097 retval = ZFCP_ERP_FAILED; 2057 return;
2098 goto out;
2099 } 2058 }
2100 2059
2101 /* 2060 /*
2102 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that 2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2103 * do_QDIO won't be called while qdio_shutdown is in progress. 2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2104 */ 2063 */
2105
2106 write_lock_irq(&adapter->request_queue.queue_lock); 2064 write_lock_irq(&adapter->request_queue.queue_lock);
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2108 write_unlock_irq(&adapter->request_queue.queue_lock); 2066 write_unlock_irq(&adapter->request_queue.queue_lock);
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2134 adapter->request_queue.free_index = 0; 2092 adapter->request_queue.free_index = 0;
2135 atomic_set(&adapter->request_queue.free_count, 0); 2093 atomic_set(&adapter->request_queue.free_count, 0);
2136 adapter->request_queue.distance_from_int = 0; 2094 adapter->request_queue.distance_from_int = 0;
2137 out:
2138 return retval;
2139} 2095}
2140 2096
2141static int 2097static int
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2258 "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2214 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2259 ret = ZFCP_ERP_FAILED; 2215 ret = ZFCP_ERP_FAILED;
2260 } 2216 }
2261 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2217
2262 ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2218 /* don't treat as error for the sake of compatibility */
2219 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2220 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2263 "%s\n", zfcp_get_busid_by_adapter(adapter)); 2221 "%s\n", zfcp_get_busid_by_adapter(adapter));
2264 ret = ZFCP_ERP_FAILED;
2265 }
2266 2222
2267 return ret; 2223 return ret;
2268} 2224}
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2292 return retval; 2248 return retval;
2293} 2249}
2294 2250
2295/* 2251/**
2296 * function: zfcp_fsf_cleanup 2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2297 *
2298 * purpose: cleanup FSF operation for specified adapter
2299 *
2300 * returns: 0 - FSF operation successfully cleaned up
2301 * !0 - failed to cleanup FSF operation for this adapter
2302 */ 2253 */
2303static int 2254static void
2304zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) 2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2305{ 2256{
2306 int retval = ZFCP_ERP_SUCCEEDED;
2307 struct zfcp_adapter *adapter = erp_action->adapter; 2257 struct zfcp_adapter *adapter = erp_action->adapter;
2308 2258
2309 /* 2259 /*
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2317 /* all ports and units are closed */ 2267 /* all ports and units are closed */
2318 zfcp_erp_modify_adapter_status(adapter, 2268 zfcp_erp_modify_adapter_status(adapter,
2319 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2320
2321 return retval;
2322} 2270}
2323 2271
2324/* 2272/*
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3293} 3241}
3294 3242
3295 3243
3296static int 3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3297zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3298{ 3245{
3299 int retval = 0;
3300 struct zfcp_port *port; 3246 struct zfcp_port *port;
3301 3247
3302 debug_text_event(adapter->erp_dbf, 5, "a_actab"); 3248 debug_text_event(adapter->erp_dbf, 5, "a_actab");
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3305 else 3251 else
3306 list_for_each_entry(port, &adapter->port_list_head, list) 3252 list_for_each_entry(port, &adapter->port_list_head, list)
3307 zfcp_erp_action_dismiss_port(port); 3253 zfcp_erp_action_dismiss_port(port);
3308
3309 return retval;
3310} 3254}
3311 3255
3312static int 3256static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3313zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3314{ 3257{
3315 int retval = 0;
3316 struct zfcp_unit *unit; 3258 struct zfcp_unit *unit;
3317 struct zfcp_adapter *adapter = port->adapter; 3259 struct zfcp_adapter *adapter = port->adapter;
3318 3260
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3323 else 3265 else
3324 list_for_each_entry(unit, &port->unit_list_head, list) 3266 list_for_each_entry(unit, &port->unit_list_head, list)
3325 zfcp_erp_action_dismiss_unit(unit); 3267 zfcp_erp_action_dismiss_unit(unit);
3326
3327 return retval;
3328} 3268}
3329 3269
3330static int 3270static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3331zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3332{ 3271{
3333 int retval = 0;
3334 struct zfcp_adapter *adapter = unit->port->adapter; 3272 struct zfcp_adapter *adapter = unit->port->adapter;
3335 3273
3336 debug_text_event(adapter->erp_dbf, 5, "u_actab"); 3274 debug_text_event(adapter->erp_dbf, 5, "u_actab");
3337 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); 3275 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3338 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 3276 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
3339 zfcp_erp_action_dismiss(&unit->erp_action); 3277 zfcp_erp_action_dismiss(&unit->erp_action);
3340
3341 return retval;
3342} 3278}
3343 3279
3344static inline void 3280static inline void
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index d02366004cdd..146d7a2b4c4a 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
63extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 63extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
65 struct zfcp_fsf_req *); 65 struct zfcp_fsf_req *);
66extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
67 66
68extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
69 (struct zfcp_fsf_req *, int, int); 68 (struct zfcp_fsf_req *, int, int);
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
140extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
141extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
142extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 143
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 145extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
156extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 156extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
157extern int zfcp_erp_thread_kill(struct zfcp_adapter *); 157extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
158extern int zfcp_erp_wait(struct zfcp_adapter *); 158extern int zfcp_erp_wait(struct zfcp_adapter *);
159extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); 159extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
160 160
161extern int zfcp_test_link(struct zfcp_port *); 161extern int zfcp_test_link(struct zfcp_port *);
162 162
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
190 struct zfcp_fsf_req *); 190 struct zfcp_fsf_req *);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 192 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
194extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
195extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
196 unsigned long);
197extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
193 198
194#endif /* ZFCP_EXT_H */ 199#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 31db2b06faba..ff2eacf5ec8c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, 49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
50 struct fsf_link_down_info *); 50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
53 52
54/* association between FSF command and FSF QTCB type */ 53/* association between FSF command and FSF QTCB type */
55static u32 fsf_qtcb_type[] = { 54static u32 fsf_qtcb_type[] = {
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
146 kfree(fsf_req); 145 kfree(fsf_req);
147} 146}
148 147
149/* 148/**
150 * function: 149 * zfcp_fsf_req_dismiss - dismiss a single fsf request
151 *
152 * purpose:
153 *
154 * returns:
155 *
156 * note: qdio queues shall be down (no ongoing inbound processing)
157 */ 150 */
158int 151static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
159zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 152 struct zfcp_fsf_req *fsf_req,
153 unsigned int counter)
160{ 154{
161 struct zfcp_fsf_req *fsf_req, *tmp; 155 u64 dbg_tmp[2];
162 unsigned long flags;
163 LIST_HEAD(remove_queue);
164 156
165 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 157 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
166 list_splice_init(&adapter->fsf_req_list_head, &remove_queue); 158 dbg_tmp[1] = (u64) counter;
167 atomic_set(&adapter->fsf_reqs_active, 0); 159 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
168 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 160 list_del(&fsf_req->list);
169 161 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
170 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 162 zfcp_fsf_req_complete(fsf_req);
171 list_del(&fsf_req->list);
172 zfcp_fsf_req_dismiss(fsf_req);
173 }
174
175 return 0;
176} 163}
177 164
178/* 165/**
179 * function: 166 * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
180 *
181 * purpose:
182 *
183 * returns:
184 */ 167 */
185static void 168int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
186zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
187{ 169{
188 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 170 struct zfcp_fsf_req *request, *tmp;
189 zfcp_fsf_req_complete(fsf_req); 171 unsigned long flags;
172 unsigned int i, counter;
173
174 spin_lock_irqsave(&adapter->req_list_lock, flags);
175 atomic_set(&adapter->reqs_active, 0);
176 for (i=0; i<REQUEST_LIST_SIZE; i++) {
177 if (list_empty(&adapter->req_list[i]))
178 continue;
179
180 counter = 0;
181 list_for_each_entry_safe(request, tmp,
182 &adapter->req_list[i], list) {
183 zfcp_fsf_req_dismiss(adapter, request, counter);
184 counter++;
185 }
186 }
187 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
188
189 return 0;
190} 190}
191 191
192/* 192/*
@@ -4592,12 +4592,14 @@ static inline void
4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4593{ 4593{
4594 if (likely(fsf_req->qtcb != NULL)) { 4594 if (likely(fsf_req->qtcb != NULL)) {
4595 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; 4595 fsf_req->qtcb->prefix.req_seq_no =
4596 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4596 fsf_req->adapter->fsf_req_seq_no;
4597 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4597 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4598 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4598 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; 4599 fsf_req->qtcb->prefix.qtcb_type =
4600 fsf_qtcb_type[fsf_req->fsf_command];
4599 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4601 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4600 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4602 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4601 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; 4603 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4602 } 4604 }
4603} 4605}
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4654{ 4656{
4655 volatile struct qdio_buffer_element *sbale; 4657 volatile struct qdio_buffer_element *sbale;
4656 struct zfcp_fsf_req *fsf_req = NULL; 4658 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4657 int ret = 0; 4660 int ret = 0;
4658 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4659 4662
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4668 4671
4669 fsf_req->adapter = adapter; 4672 fsf_req->adapter = adapter;
4670 fsf_req->fsf_command = fsf_cmd; 4673 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list);
4675
4676 /* unique request id */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags);
4678 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4671 4680
4672 zfcp_fsf_req_qtcb_init(fsf_req); 4681 zfcp_fsf_req_qtcb_init(fsf_req);
4673 4682
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4707 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 4716 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4708 4717
4709 /* setup common SBALE fields */ 4718 /* setup common SBALE fields */
4710 sbale[0].addr = fsf_req; 4719 sbale[0].addr = (void *) fsf_req->req_id;
4711 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 4720 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
4712 if (likely(fsf_req->qtcb != NULL)) { 4721 if (likely(fsf_req->qtcb != NULL)) {
4713 sbale[1].addr = (void *) fsf_req->qtcb; 4722 sbale[1].addr = (void *) fsf_req->qtcb;
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4747 volatile struct qdio_buffer_element *sbale; 4756 volatile struct qdio_buffer_element *sbale;
4748 int inc_seq_no; 4757 int inc_seq_no;
4749 int new_distance_from_int; 4758 int new_distance_from_int;
4750 unsigned long flags; 4759 u64 dbg_tmp[2];
4751 int retval = 0; 4760 int retval = 0;
4752 4761
4753 adapter = fsf_req->adapter; 4762 adapter = fsf_req->adapter;
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4761 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4770 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4762 sbale[1].length); 4771 sbale[1].length);
4763 4772
4764 /* put allocated FSF request at list tail */ 4773 /* put allocated FSF request into hash table */
4765 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4774 spin_lock(&adapter->req_list_lock);
4766 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4775 zfcp_reqlist_add(adapter, fsf_req);
4767 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4776 spin_unlock(&adapter->req_list_lock);
4768 4777
4769 inc_seq_no = (fsf_req->qtcb != NULL); 4778 inc_seq_no = (fsf_req->qtcb != NULL);
4770 4779
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4803 QDIO_FLAG_SYNC_OUTPUT, 4812 QDIO_FLAG_SYNC_OUTPUT,
4804 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4813 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
4805 4814
4815 dbg_tmp[0] = (unsigned long) sbale[0].addr;
4816 dbg_tmp[1] = (u64) retval;
4817 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
4818
4806 if (unlikely(retval)) { 4819 if (unlikely(retval)) {
4807 /* Queues are down..... */ 4820 /* Queues are down..... */
4808 retval = -EIO; 4821 retval = -EIO;
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4812 */ 4825 */
4813 if (timer) 4826 if (timer)
4814 del_timer(timer); 4827 del_timer(timer);
4815 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4828 spin_lock(&adapter->req_list_lock);
4816 list_del(&fsf_req->list); 4829 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4817 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4830 spin_unlock(&adapter->req_list_lock);
4818 /* 4831 /* undo changes in request queue made for this request */
4819 * adjust the number of free SBALs in request queue as well as
4820 * position of first one
4821 */
4822 zfcp_qdio_zero_sbals(req_queue->buffer, 4832 zfcp_qdio_zero_sbals(req_queue->buffer,
4823 fsf_req->sbal_first, fsf_req->sbal_number); 4833 fsf_req->sbal_first, fsf_req->sbal_number);
4824 atomic_add(fsf_req->sbal_number, &req_queue->free_count); 4834 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4825 req_queue->free_index -= fsf_req->sbal_number; /* increase */ 4835 req_queue->free_index -= fsf_req->sbal_number;
4826 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; 4836 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4827 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 4837 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4828 ZFCP_LOG_DEBUG 4838 zfcp_erp_adapter_reopen(adapter, 0);
4829 ("error: do_QDIO failed. Buffers could not be enqueued "
4830 "to request queue.\n");
4831 } else { 4839 } else {
4832 req_queue->distance_from_int = new_distance_from_int; 4840 req_queue->distance_from_int = new_distance_from_int;
4833 /* 4841 /*
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4843 adapter->fsf_req_seq_no++; 4851 adapter->fsf_req_seq_no++;
4844 4852
4845 /* count FSF requests pending */ 4853 /* count FSF requests pending */
4846 atomic_inc(&adapter->fsf_reqs_active); 4854 atomic_inc(&adapter->reqs_active);
4847 } 4855 }
4848 return retval; 4856 return retval;
4849} 4857}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 49ea5add4abc..dbd9f48e863e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 return; 282 return;
283} 283}
284 284
285/**
286 * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
287 */
288static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
289 unsigned long req_id)
290{
291 struct zfcp_fsf_req *fsf_req;
292 unsigned long flags;
293
294 debug_long_event(adapter->erp_dbf, 4, req_id);
295
296 spin_lock_irqsave(&adapter->req_list_lock, flags);
297 fsf_req = zfcp_reqlist_ismember(adapter, req_id);
298
299 if (!fsf_req) {
300 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
301 ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
302 zfcp_erp_adapter_reopen(adapter, 0);
303 return -EINVAL;
304 }
305
306 zfcp_reqlist_remove(adapter, req_id);
307 atomic_dec(&adapter->reqs_active);
308 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
309
310 /* finish the FSF request */
311 zfcp_fsf_req_complete(fsf_req);
312
313 return 0;
314}
315
285/* 316/*
286 * function: zfcp_qdio_response_handler 317 * function: zfcp_qdio_response_handler
287 * 318 *
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
344 /* look for QDIO request identifiers in SB */ 375 /* look for QDIO request identifiers in SB */
345 buffere = &buffer->element[buffere_index]; 376 buffere = &buffer->element[buffere_index];
346 retval = zfcp_qdio_reqid_check(adapter, 377 retval = zfcp_qdio_reqid_check(adapter,
347 (void *) buffere->addr); 378 (unsigned long) buffere->addr);
348 379
349 if (retval) { 380 if (retval) {
350 ZFCP_LOG_NORMAL("bug: unexpected inbound " 381 ZFCP_LOG_NORMAL("bug: unexpected inbound "
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
415 return; 446 return;
416} 447}
417 448
418/*
419 * function: zfcp_qdio_reqid_check
420 *
421 * purpose: checks for valid reqids or unsolicited status
422 *
423 * returns: 0 - valid request id or unsolicited status
424 * !0 - otherwise
425 */
426int
427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
428{
429 struct zfcp_fsf_req *fsf_req;
430 unsigned long flags;
431
432 /* invalid (per convention used in this driver) */
433 if (unlikely(!sbale_addr)) {
434 ZFCP_LOG_NORMAL("bug: invalid reqid\n");
435 return -EINVAL;
436 }
437
438 /* valid request id and thus (hopefully :) valid fsf_req address */
439 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
440
441 /* serialize with zfcp_fsf_req_dismiss_all */
442 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
443 if (list_empty(&adapter->fsf_req_list_head)) {
444 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
445 return 0;
446 }
447 list_del(&fsf_req->list);
448 atomic_dec(&adapter->fsf_reqs_active);
449 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
450
451 if (unlikely(adapter != fsf_req->adapter)) {
452 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
453 "fsf_req->adapter=%p, adapter=%p)\n",
454 fsf_req, fsf_req->adapter, adapter);
455 return -EINVAL;
456 }
457
458 /* finish the FSF request */
459 zfcp_fsf_req_complete(fsf_req);
460
461 return 0;
462}
463
464/** 449/**
465 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 450 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
466 * @queue: queue from which SBALE should be returned 451 * @queue: queue from which SBALE should be returned
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 671f4a6a5d18..1bb55086db9f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
30 void (*done) (struct scsi_cmnd *)); 30 void (*done) (struct scsi_cmnd *));
31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); 31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 33static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
35static int zfcp_task_management_function(struct zfcp_unit *, u8, 34static int zfcp_task_management_function(struct zfcp_unit *, u8,
36 struct scsi_cmnd *); 35 struct scsi_cmnd *);
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = {
46 .scsi_host_template = { 45 .scsi_host_template = {
47 .name = ZFCP_NAME, 46 .name = ZFCP_NAME,
48 .proc_name = "zfcp", 47 .proc_name = "zfcp",
49 .proc_info = NULL,
50 .detect = NULL,
51 .slave_alloc = zfcp_scsi_slave_alloc, 48 .slave_alloc = zfcp_scsi_slave_alloc,
52 .slave_configure = zfcp_scsi_slave_configure, 49 .slave_configure = zfcp_scsi_slave_configure,
53 .slave_destroy = zfcp_scsi_slave_destroy, 50 .slave_destroy = zfcp_scsi_slave_destroy,
54 .queuecommand = zfcp_scsi_queuecommand, 51 .queuecommand = zfcp_scsi_queuecommand,
55 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 52 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
56 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 53 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
57 .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, 54 .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 55 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
59 .can_queue = 4096, 56 .can_queue = 4096,
60 .this_id = -1, 57 .this_id = -1,
61 /*
62 * FIXME:
63 * one less? can zfcp_create_sbale cope with it?
64 */
65 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, 58 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
66 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
67 .unchecked_isa_dma = 0,
68 .use_clustering = 1, 60 .use_clustering = 1,
69 .sdev_attrs = zfcp_sysfs_sdev_attrs, 61 .sdev_attrs = zfcp_sysfs_sdev_attrs,
70 }, 62 },
71 .driver_version = ZFCP_VERSION, 63 .driver_version = ZFCP_VERSION,
72 /* rest initialised with zeros */
73}; 64};
74 65
75/* Find start of Response Information in FCP response unit*/ 66/* Find start of Response Information in FCP response unit*/
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
176 return retval; 167 return retval;
177} 168}
178 169
179static void 170/**
180zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 171 * zfcp_scsi_slave_destroy - called when scsi device is removed
172 *
173 * Remove reference to associated scsi device for an zfcp_unit.
174 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
175 * or a scan for this device might have failed.
176 */
177static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
181{ 178{
182 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 179 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
183 180
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
185 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 182 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
186 sdpnt->hostdata = NULL; 183 sdpnt->hostdata = NULL;
187 unit->device = NULL; 184 unit->device = NULL;
185 zfcp_erp_unit_failed(unit);
188 zfcp_unit_put(unit); 186 zfcp_unit_put(unit);
189 } else { 187 } else {
190 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " 188 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
549} 547}
550 548
551/** 549/**
552 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) 550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 553 */
554int 554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
556{ 555{
557 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; 556 struct zfcp_unit *unit;
558 struct zfcp_adapter *adapter = unit->port->adapter; 557 struct zfcp_adapter *adapter;
559 558 unsigned long flags;
560 ZFCP_LOG_NORMAL("bus reset because of problems with "
561 "unit 0x%016Lx\n", unit->fcp_lun);
562 zfcp_erp_adapter_reopen(adapter, 0);
563 zfcp_erp_wait(adapter);
564
565 return SUCCESS;
566}
567 559
568/** 560 unit = (struct zfcp_unit*) scpnt->device->hostdata;
569 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) 561 adapter = unit->port->adapter;
570 */
571int
572zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
573{
574 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
575 struct zfcp_adapter *adapter = unit->port->adapter;
576 562
577 ZFCP_LOG_NORMAL("host reset because of problems with " 563 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
578 "unit 0x%016Lx\n", unit->fcp_lun); 564 "unit 0x%016Lx\n", unit->fcp_lun);
579 zfcp_erp_adapter_reopen(adapter, 0); 565
580 zfcp_erp_wait(adapter); 566 write_lock_irqsave(&adapter->erp_lock, flags);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
581 582
582 return SUCCESS; 583 return SUCCESS;
583} 584}
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 5e8afc876980..2d20caf377f5 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -390,7 +390,8 @@ static struct ata_port_info piix_port_info[] = {
390 /* ich5_sata */ 390 /* ich5_sata */
391 { 391 {
392 .sht = &piix_sht, 392 .sht = &piix_sht,
393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
394 PIIX_FLAG_IGNORE_PCS,
394 .pio_mask = 0x1f, /* pio0-4 */ 395 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */ 396 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */ 397 .udma_mask = 0x7f, /* udma0-6 */
@@ -467,6 +468,11 @@ MODULE_LICENSE("GPL");
467MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 468MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
468MODULE_VERSION(DRV_VERSION); 469MODULE_VERSION(DRV_VERSION);
469 470
471static int force_pcs = 0;
472module_param(force_pcs, int, 0444);
473MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
474 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
475
470/** 476/**
471 * piix_pata_cbl_detect - Probe host controller cable detect info 477 * piix_pata_cbl_detect - Probe host controller cable detect info
472 * @ap: Port for which cable detect info is desired 478 * @ap: Port for which cable detect info is desired
@@ -531,27 +537,25 @@ static void piix_pata_error_handler(struct ata_port *ap)
531} 537}
532 538
533/** 539/**
534 * piix_sata_prereset - prereset for SATA host controller 540 * piix_sata_present_mask - determine present mask for SATA host controller
535 * @ap: Target port 541 * @ap: Target port
536 * 542 *
537 * Reads and configures SATA PCI device's PCI config register 543 * Reads SATA PCI device's PCI config register Port Configuration
538 * Port Configuration and Status (PCS) to determine port and 544 * and Status (PCS) to determine port and device availability.
539 * device availability. Return -ENODEV to skip reset if no
540 * device is present.
541 * 545 *
542 * LOCKING: 546 * LOCKING:
543 * None (inherited from caller). 547 * None (inherited from caller).
544 * 548 *
545 * RETURNS: 549 * RETURNS:
546 * 0 if device is present, -ENODEV otherwise. 550 * determined present_mask
547 */ 551 */
548static int piix_sata_prereset(struct ata_port *ap) 552static unsigned int piix_sata_present_mask(struct ata_port *ap)
549{ 553{
550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 554 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
551 struct piix_host_priv *hpriv = ap->host_set->private_data; 555 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map; 556 const unsigned int *map = hpriv->map;
553 int base = 2 * ap->hard_port_no; 557 int base = 2 * ap->hard_port_no;
554 unsigned int present = 0; 558 unsigned int present_mask = 0;
555 int port, i; 559 int port, i;
556 u16 pcs; 560 u16 pcs;
557 561
@@ -564,24 +568,52 @@ static int piix_sata_prereset(struct ata_port *ap)
564 continue; 568 continue;
565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || 569 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
566 (pcs & 1 << (hpriv->map_db->present_shift + port))) 570 (pcs & 1 << (hpriv->map_db->present_shift + port)))
567 present = 1; 571 present_mask |= 1 << i;
568 } 572 }
569 573
570 DPRINTK("ata%u: LEAVE, pcs=0x%x present=0x%x\n", 574 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
571 ap->id, pcs, present); 575 ap->id, pcs, present_mask);
572 576
573 if (!present) { 577 return present_mask;
574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 578}
575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 579
576 return 0; 580/**
581 * piix_sata_softreset - reset SATA host port via ATA SRST
582 * @ap: port to reset
583 * @classes: resulting classes of attached devices
584 *
585 * Reset SATA host port via ATA SRST. On controllers with
586 * reliable PCS present bits, the bits are used to determine
587 * device presence.
588 *
589 * LOCKING:
590 * Kernel thread context (may sleep)
591 *
592 * RETURNS:
593 * 0 on success, -errno otherwise.
594 */
595static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
596{
597 unsigned int present_mask;
598 int i, rc;
599
600 present_mask = piix_sata_present_mask(ap);
601
602 rc = ata_std_softreset(ap, classes);
603 if (rc)
604 return rc;
605
606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
607 if (!(present_mask & (1 << i)))
608 classes[i] = ATA_DEV_NONE;
577 } 609 }
578 610
579 return ata_std_prereset(ap); 611 return 0;
580} 612}
581 613
582static void piix_sata_error_handler(struct ata_port *ap) 614static void piix_sata_error_handler(struct ata_port *ap)
583{ 615{
584 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, 616 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
585 ata_std_postreset); 617 ata_std_postreset);
586} 618}
587 619
@@ -785,6 +817,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
785} 817}
786 818
787static void __devinit piix_init_pcs(struct pci_dev *pdev, 819static void __devinit piix_init_pcs(struct pci_dev *pdev,
820 struct ata_port_info *pinfo,
788 const struct piix_map_db *map_db) 821 const struct piix_map_db *map_db)
789{ 822{
790 u16 pcs, new_pcs; 823 u16 pcs, new_pcs;
@@ -798,6 +831,18 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 831 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150); 832 msleep(150);
800 } 833 }
834
835 if (force_pcs == 1) {
836 dev_printk(KERN_INFO, &pdev->dev,
837 "force ignoring PCS (0x%x)\n", new_pcs);
838 pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
839 pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
840 } else if (force_pcs == 2) {
841 dev_printk(KERN_INFO, &pdev->dev,
842 "force honoring PCS (0x%x)\n", new_pcs);
843 pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
844 pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
845 }
801} 846}
802 847
803static void __devinit piix_init_sata_map(struct pci_dev *pdev, 848static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -906,7 +951,8 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
906 if (host_flags & ATA_FLAG_SATA) { 951 if (host_flags & ATA_FLAG_SATA) {
907 piix_init_sata_map(pdev, port_info, 952 piix_init_sata_map(pdev, port_info,
908 piix_map_db_table[ent->driver_data]); 953 piix_map_db_table[ent->driver_data]);
909 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); 954 piix_init_pcs(pdev, port_info,
955 piix_map_db_table[ent->driver_data]);
910 } 956 }
911 957
912 /* On ICH5, some BIOSen disable the interrupt using the 958 /* On ICH5, some BIOSen disable the interrupt using the
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 98bd22714d0d..5630868c1b25 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev;
1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt) 1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1147{ 1147{
1148 if (sun4_esp_physaddr) { 1148 if (sun4_esp_physaddr) {
1149 memset(&sun4_esp_dev, 0, sizeof(esp_dev)); 1149 memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; 1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
1151 sun4_esp_dev.irqs[0] = 4; 1151 sun4_esp_dev.irqs[0] = 4;
1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr; 1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1162 1162
1163static int __devexit esp_sun4_remove(void) 1163static int __devexit esp_sun4_remove(void)
1164{ 1164{
1165 struct of_device *dev = &sun4_esp_dev.ofdev;
1165 struct esp *esp = dev_get_drvdata(&dev->dev); 1166 struct esp *esp = dev_get_drvdata(&dev->dev);
1166 1167
1167 return esp_remove_common(esp); 1168 return esp_remove_common(esp);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ab2f8b267908..bcb3444f1dcf 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop";
45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
46static const char driver_ver[] = "v1.0 (060426)"; 46static const char driver_ver[] = "v1.0 (060426)";
47 47
48static DEFINE_SPINLOCK(hptiop_hba_list_lock);
49static LIST_HEAD(hptiop_hba_list);
50static int hptiop_cdev_major = -1;
51
52static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
53static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 49static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
54static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 50static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
577 if (atomic_xchg(&hba->resetting, 1) == 0) { 573 if (atomic_xchg(&hba->resetting, 1) == 0) {
578 atomic_inc(&hba->reset_count); 574 atomic_inc(&hba->reset_count);
579 writel(IOPMU_INBOUND_MSG0_RESET, 575 writel(IOPMU_INBOUND_MSG0_RESET,
580 &hba->iop->outbound_msgaddr0); 576 &hba->iop->inbound_msgaddr0);
581 hptiop_pci_posting_flush(hba->iop); 577 hptiop_pci_posting_flush(hba->iop);
582 } 578 }
583 579
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
620 return queue_depth; 616 return queue_depth;
621} 617}
622 618
623struct hptiop_getinfo {
624 char __user *buffer;
625 loff_t buflength;
626 loff_t bufoffset;
627 loff_t buffillen;
628 loff_t filpos;
629};
630
631static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
632 char *data, int datalen)
633{
634 if (pinfo->filpos < pinfo->bufoffset) {
635 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
636 pinfo->filpos += datalen;
637 return;
638 } else {
639 data += (pinfo->bufoffset - pinfo->filpos);
640 datalen -= (pinfo->bufoffset - pinfo->filpos);
641 pinfo->filpos = pinfo->bufoffset;
642 }
643 }
644
645 pinfo->filpos += datalen;
646 if (pinfo->buffillen == pinfo->buflength)
647 return;
648
649 if (pinfo->buflength - pinfo->buffillen < datalen)
650 datalen = pinfo->buflength - pinfo->buffillen;
651
652 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
653 return;
654
655 pinfo->buffillen += datalen;
656}
657
658static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
659{
660 va_list args;
661 char buf[128];
662 int len;
663
664 va_start(args, fmt);
665 len = vsnprintf(buf, sizeof(buf), fmt, args);
666 va_end(args);
667 hptiop_copy_mem_info(pinfo, buf, len);
668 return len;
669}
670
671static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
672{
673 arg->done = NULL;
674 wake_up(&arg->hba->ioctl_wq);
675}
676
677static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
678{
679 struct hptiop_hba *hba = arg->hba;
680 u32 val;
681 struct hpt_iop_request_ioctl_command __iomem *req;
682 int ioctl_retry = 0;
683
684 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
685
686 /*
687 * check (in + out) buff size from application.
688 * outbuf must be dword aligned.
689 */
690 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
691 hba->max_request_size
692 - sizeof(struct hpt_iop_request_header)
693 - 4 * sizeof(u32)) {
694 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
695 hba->host->host_no,
696 arg->inbuf_size, arg->outbuf_size);
697 arg->result = HPT_IOCTL_RESULT_FAILED;
698 return;
699 }
700
701retry:
702 spin_lock_irq(hba->host->host_lock);
703
704 val = readl(&hba->iop->inbound_queue);
705 if (val == IOPMU_QUEUE_EMPTY) {
706 spin_unlock_irq(hba->host->host_lock);
707 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
708 arg->result = -1;
709 return;
710 }
711
712 req = (struct hpt_iop_request_ioctl_command __iomem *)
713 ((unsigned long)hba->iop + val);
714
715 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
716 &req->ioctl_code);
717 writel(arg->inbuf_size, &req->inbuf_size);
718 writel(arg->outbuf_size, &req->outbuf_size);
719
720 /*
721 * use the buffer on the IOP local memory first, then copy it
722 * back to host.
723 * the caller's request buffer shoudl be little-endian.
724 */
725 if (arg->inbuf_size)
726 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
727
728 /* correct the controller ID for IOP */
729 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
731 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
732 && arg->inbuf_size >= sizeof(u32))
733 writel(0, req->buf);
734
735 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
736 writel(0, &req->header.flags);
737 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
738 + arg->inbuf_size, &req->header.size);
739 writel((u32)(unsigned long)arg, &req->header.context);
740 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
741 &req->header.context_hi32);
742 writel(IOP_RESULT_PENDING, &req->header.result);
743
744 arg->result = HPT_IOCTL_RESULT_FAILED;
745 arg->done = hptiop_ioctl_done;
746
747 writel(val, &hba->iop->inbound_queue);
748 hptiop_pci_posting_flush(hba->iop);
749
750 spin_unlock_irq(hba->host->host_lock);
751
752 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
753
754 if (arg->done != NULL) {
755 hptiop_reset_hba(hba);
756 if (ioctl_retry++ < 3)
757 goto retry;
758 }
759
760 dprintk("hpt_iop_ioctl %x result %d\n",
761 arg->ioctl_code, arg->result);
762}
763
764static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
765 u32 insize, void *outbuf, u32 outsize)
766{
767 struct hpt_ioctl_k arg;
768 arg.hba = hba;
769 arg.ioctl_code = code;
770 arg.inbuf = inbuf;
771 arg.outbuf = outbuf;
772 arg.inbuf_size = insize;
773 arg.outbuf_size = outsize;
774 arg.bytes_returned = NULL;
775 hptiop_do_ioctl(&arg);
776 return arg.result;
777}
778
779static inline int hpt_id_valid(__le32 id)
780{
781 return id != 0 && id != cpu_to_le32(0xffffffff);
782}
783
784static int hptiop_get_controller_info(struct hptiop_hba *hba,
785 struct hpt_controller_info *pinfo)
786{
787 int id = 0;
788
789 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
790 &id, sizeof(int), pinfo, sizeof(*pinfo));
791}
792
793
794static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
795 struct hpt_channel_info *pinfo)
796{
797 u32 ids[2];
798
799 ids[0] = 0;
800 ids[1] = bus;
801 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
802 ids, sizeof(ids), pinfo, sizeof(*pinfo));
803
804}
805
806static int hptiop_get_logical_devices(struct hptiop_hba *hba,
807 __le32 *pids, int maxcount)
808{
809 int i;
810 u32 count = maxcount - 1;
811
812 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
813 &count, sizeof(u32),
814 pids, sizeof(u32) * maxcount))
815 return -1;
816
817 maxcount = le32_to_cpu(pids[0]);
818 for (i = 0; i < maxcount; i++)
819 pids[i] = pids[i+1];
820
821 return maxcount;
822}
823
824static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
825 struct hpt_logical_device_info_v3 *pinfo)
826{
827 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
828 &id, sizeof(u32),
829 pinfo, sizeof(*pinfo));
830}
831
832static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
833{
834 static char s[64];
835 u32 flags = le32_to_cpu(devinfo->u.array.flags);
836 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
837 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
838
839 if (flags & ARRAY_FLAG_DISABLED)
840 return "Disabled";
841 else if (flags & ARRAY_FLAG_TRANSFORMING)
842 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
843 trans_prog / 100,
844 trans_prog % 100,
845 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
846 ", Critical" : "",
847 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
848 !(flags & ARRAY_FLAG_REBUILDING) &&
849 !(flags & ARRAY_FLAG_INITIALIZING))?
850 ", Unintialized" : "");
851 else if ((flags & ARRAY_FLAG_BROKEN) &&
852 devinfo->u.array.array_type != AT_RAID6)
853 return "Critical";
854 else if (flags & ARRAY_FLAG_REBUILDING)
855 sprintf(s,
856 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
857 "%sBackground initializing %d.%d%%" :
858 "%sRebuilding %d.%d%%",
859 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
860 reb_prog / 100,
861 reb_prog % 100);
862 else if (flags & ARRAY_FLAG_VERIFYING)
863 sprintf(s, "%sVerifying %d.%d%%",
864 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
865 reb_prog / 100,
866 reb_prog % 100);
867 else if (flags & ARRAY_FLAG_INITIALIZING)
868 sprintf(s, "%sForground initializing %d.%d%%",
869 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
870 reb_prog / 100,
871 reb_prog % 100);
872 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
873 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
874 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
875 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
876 !(flags & ARRAY_FLAG_REBUILDING) &&
877 !(flags & ARRAY_FLAG_INITIALIZING))?
878 ", Unintialized" : "");
879 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
880 !(flags & ARRAY_FLAG_REBUILDING) &&
881 !(flags & ARRAY_FLAG_INITIALIZING))
882 sprintf(s,"%sUninitialized",
883 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
884 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
885 (flags & ARRAY_FLAG_BROKEN))
886 return "Critical";
887 else
888 return "Normal";
889 return s;
890}
891
892static void hptiop_dump_devinfo(struct hptiop_hba *hba,
893 struct hptiop_getinfo *pinfo, __le32 id, int indent)
894{
895 struct hpt_logical_device_info_v3 devinfo;
896 int i;
897 u64 capacity;
898
899 for (i = 0; i < indent; i++)
900 hptiop_copy_info(pinfo, "\t");
901
902 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
903 hptiop_copy_info(pinfo, "unknown\n");
904 return;
905 }
906
907 switch (devinfo.type) {
908
909 case LDT_DEVICE: {
910 struct hd_driveid *driveid;
911 u32 flags = le32_to_cpu(devinfo.u.device.flags);
912
913 driveid = (struct hd_driveid *)devinfo.u.device.ident;
914 /* model[] is 40 chars long, but we just want 20 chars here */
915 driveid->model[20] = 0;
916
917 if (indent)
918 if (flags & DEVICE_FLAG_DISABLED)
919 hptiop_copy_info(pinfo,"Missing\n");
920 else
921 hptiop_copy_info(pinfo, "CH%d %s\n",
922 devinfo.u.device.path_id + 1,
923 driveid->model);
924 else {
925 capacity = le64_to_cpu(devinfo.capacity) * 512;
926 do_div(capacity, 1000000);
927 hptiop_copy_info(pinfo,
928 "CH%d %s, %lluMB, %s %s%s%s%s\n",
929 devinfo.u.device.path_id + 1,
930 driveid->model,
931 capacity,
932 (flags & DEVICE_FLAG_DISABLED)?
933 "Disabled" : "Normal",
934 devinfo.u.device.read_ahead_enabled?
935 "[RA]" : "",
936 devinfo.u.device.write_cache_enabled?
937 "[WC]" : "",
938 devinfo.u.device.TCQ_enabled?
939 "[TCQ]" : "",
940 devinfo.u.device.NCQ_enabled?
941 "[NCQ]" : ""
942 );
943 }
944 break;
945 }
946
947 case LDT_ARRAY:
948 if (devinfo.target_id != INVALID_TARGET_ID)
949 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
950 devinfo.vbus_id, devinfo.target_id);
951
952 capacity = le64_to_cpu(devinfo.capacity) * 512;
953 do_div(capacity, 1000000);
954 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
955 devinfo.u.array.name,
956 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
957 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
958 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
959 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
960 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
961 "unknown",
962 capacity,
963 get_array_status(&devinfo));
964 for (i = 0; i < devinfo.u.array.ndisk; i++) {
965 if (hpt_id_valid(devinfo.u.array.members[i])) {
966 if (cpu_to_le16(1<<i) &
967 devinfo.u.array.critical_members)
968 hptiop_copy_info(pinfo, "\t*");
969 hptiop_dump_devinfo(hba, pinfo,
970 devinfo.u.array.members[i], indent+1);
971 }
972 else
973 hptiop_copy_info(pinfo, "\tMissing\n");
974 }
975 if (id == devinfo.u.array.transform_source) {
976 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
977 hptiop_dump_devinfo(hba, pinfo,
978 devinfo.u.array.transform_target, indent+1);
979 }
980 break;
981 }
982}
983
984static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 619static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
985{ 620{
986 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 621 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
987} 622}
988 623
989static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
990 size_t count, loff_t *ppos)
991{
992 struct hptiop_hba *hba = filp->private_data;
993 struct hptiop_getinfo info;
994 int i, j, ndev;
995 struct hpt_controller_info con_info;
996 struct hpt_channel_info chan_info;
997 __le32 ids[32];
998
999 info.buffer = buf;
1000 info.buflength = count;
1001 info.bufoffset = ppos ? *ppos : 0;
1002 info.filpos = 0;
1003 info.buffillen = 0;
1004
1005 if (hptiop_get_controller_info(hba, &con_info))
1006 return -EIO;
1007
1008 for (i = 0; i < con_info.num_buses; i++) {
1009 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1010 if (hpt_id_valid(chan_info.devices[0]))
1011 hptiop_dump_devinfo(hba, &info,
1012 chan_info.devices[0], 0);
1013 if (hpt_id_valid(chan_info.devices[1]))
1014 hptiop_dump_devinfo(hba, &info,
1015 chan_info.devices[1], 0);
1016 }
1017 }
1018
1019 ndev = hptiop_get_logical_devices(hba, ids,
1020 sizeof(ids) / sizeof(ids[0]));
1021
1022 /*
1023 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1024 * output nothing here
1025 */
1026 for (j = 0; j < ndev; j++)
1027 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1028
1029 if (ppos)
1030 *ppos += info.buffillen;
1031
1032 return info.buffillen;
1033}
1034
1035static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1036 unsigned int cmd, unsigned long arg)
1037{
1038 struct hptiop_hba *hba = file->private_data;
1039 struct hpt_ioctl_u ioctl_u;
1040 struct hpt_ioctl_k ioctl_k;
1041 u32 bytes_returned;
1042 int err = -EINVAL;
1043
1044 if (copy_from_user(&ioctl_u,
1045 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1046 return -EINVAL;
1047
1048 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1049 return -EINVAL;
1050
1051 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1052 ioctl_k.inbuf = NULL;
1053 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1054 ioctl_k.outbuf = NULL;
1055 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1056 ioctl_k.hba = hba;
1057 ioctl_k.bytes_returned = &bytes_returned;
1058
1059 /* verify user buffer */
1060 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1061 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1062 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1063 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1064 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1065 ioctl_u.bytes_returned, sizeof(u32))) ||
1066 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1067
1068 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1069 return -EINVAL;
1070 }
1071
1072 /* map buffer to kernel. */
1073 if (ioctl_k.inbuf_size) {
1074 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1075 if (!ioctl_k.inbuf) {
1076 dprintk("scsi%d: fail to alloc inbuf\n",
1077 hba->host->host_no);
1078 err = -ENOMEM;
1079 goto err_exit;
1080 }
1081
1082 if (copy_from_user(ioctl_k.inbuf,
1083 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1084 goto err_exit;
1085 }
1086 }
1087
1088 if (ioctl_k.outbuf_size) {
1089 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1090 if (!ioctl_k.outbuf) {
1091 dprintk("scsi%d: fail to alloc outbuf\n",
1092 hba->host->host_no);
1093 err = -ENOMEM;
1094 goto err_exit;
1095 }
1096 }
1097
1098 hptiop_do_ioctl(&ioctl_k);
1099
1100 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1101 if (ioctl_k.outbuf_size &&
1102 copy_to_user(ioctl_u.outbuf,
1103 ioctl_k.outbuf, ioctl_k.outbuf_size))
1104 goto err_exit;
1105
1106 if (ioctl_u.bytes_returned &&
1107 copy_to_user(ioctl_u.bytes_returned,
1108 &bytes_returned, sizeof(u32)))
1109 goto err_exit;
1110
1111 err = 0;
1112 }
1113
1114err_exit:
1115 kfree(ioctl_k.inbuf);
1116 kfree(ioctl_k.outbuf);
1117
1118 return err;
1119}
1120
1121static int hptiop_cdev_open(struct inode *inode, struct file *file)
1122{
1123 struct hptiop_hba *hba;
1124 unsigned i = 0, minor = iminor(inode);
1125 int ret = -ENODEV;
1126
1127 spin_lock(&hptiop_hba_list_lock);
1128 list_for_each_entry(hba, &hptiop_hba_list, link) {
1129 if (i == minor) {
1130 file->private_data = hba;
1131 ret = 0;
1132 goto out;
1133 }
1134 i++;
1135 }
1136
1137out:
1138 spin_unlock(&hptiop_hba_list_lock);
1139 return ret;
1140}
1141
1142static struct file_operations hptiop_cdev_fops = {
1143 .owner = THIS_MODULE,
1144 .read = hptiop_cdev_read,
1145 .ioctl = hptiop_cdev_ioctl,
1146 .open = hptiop_cdev_open,
1147};
1148
1149static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 624static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1150{ 625{
1151 struct Scsi_Host *host = class_to_shost(class_dev); 626 struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1296 goto unmap_pci_bar; 771 goto unmap_pci_bar;
1297 } 772 }
1298 773
1299 if (scsi_add_host(host, &pcidev->dev)) {
1300 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1301 hba->host->host_no);
1302 goto unmap_pci_bar;
1303 }
1304
1305 pci_set_drvdata(pcidev, host); 774 pci_set_drvdata(pcidev, host);
1306 775
1307 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, 776 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1308 driver_name, hba)) { 777 driver_name, hba)) {
1309 printk(KERN_ERR "scsi%d: request irq %d failed\n", 778 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1310 hba->host->host_no, pcidev->irq); 779 hba->host->host_no, pcidev->irq);
1311 goto remove_scsi_host; 780 goto unmap_pci_bar;
1312 } 781 }
1313 782
1314 /* Allocate request mem */ 783 /* Allocate request mem */
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1355 if (hptiop_initialize_iop(hba)) 824 if (hptiop_initialize_iop(hba))
1356 goto free_request_mem; 825 goto free_request_mem;
1357 826
1358 spin_lock(&hptiop_hba_list_lock); 827 if (scsi_add_host(host, &pcidev->dev)) {
1359 list_add_tail(&hba->link, &hptiop_hba_list); 828 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1360 spin_unlock(&hptiop_hba_list_lock); 829 hba->host->host_no);
830 goto free_request_mem;
831 }
832
1361 833
1362 scsi_scan_host(host); 834 scsi_scan_host(host);
1363 835
@@ -1372,9 +844,6 @@ free_request_mem:
1372free_request_irq: 844free_request_irq:
1373 free_irq(hba->pcidev->irq, hba); 845 free_irq(hba->pcidev->irq, hba);
1374 846
1375remove_scsi_host:
1376 scsi_remove_host(host);
1377
1378unmap_pci_bar: 847unmap_pci_bar:
1379 iounmap(hba->iop); 848 iounmap(hba->iop);
1380 849
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
1422 891
1423 scsi_remove_host(host); 892 scsi_remove_host(host);
1424 893
1425 spin_lock(&hptiop_hba_list_lock);
1426 list_del_init(&hba->link);
1427 spin_unlock(&hptiop_hba_list_lock);
1428
1429 hptiop_shutdown(pcidev); 894 hptiop_shutdown(pcidev);
1430 895
1431 free_irq(hba->pcidev->irq, hba); 896 free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = {
1462 927
1463static int __init hptiop_module_init(void) 928static int __init hptiop_module_init(void)
1464{ 929{
1465 int error;
1466
1467 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 930 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1468 931 return pci_register_driver(&hptiop_pci_driver);
1469 error = pci_register_driver(&hptiop_pci_driver);
1470 if (error < 0)
1471 return error;
1472
1473 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1474 if (hptiop_cdev_major < 0) {
1475 printk(KERN_WARNING "unable to register hptiop device.\n");
1476 return hptiop_cdev_major;
1477 }
1478
1479 return 0;
1480} 932}
1481 933
1482static void __exit hptiop_module_exit(void) 934static void __exit hptiop_module_exit(void)
1483{ 935{
1484 dprintk("hptiop_module_exit\n");
1485 unregister_chrdev(hptiop_cdev_major, "hptiop");
1486 pci_unregister_driver(&hptiop_pci_driver); 936 pci_unregister_driver(&hptiop_pci_driver);
1487} 937}
1488 938
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f7b5d7372d26..94d1de55607f 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
517 /* No more interrupts */ 517 /* No more interrupts */
518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); 519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
520 local_irq_enable(); 520 local_irq_enable_in_hardirq();
521 if (status.b.check) 521 if (status.b.check)
522 rq->errors++; 522 rq->errors++;
523 idescsi_end_request (drive, 1, 0); 523 idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 848fb2aa4ca3..058f094f945a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
43 43
44#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
45 45
46#define ISCSI_TCP_VERSION "1.0-595"
47
48MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>"); 47 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path"); 48MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
52MODULE_VERSION(ISCSI_TCP_VERSION);
53/* #define DEBUG_TCP */ 50/* #define DEBUG_TCP */
54#define DEBUG_ASSERT 51#define DEBUG_ASSERT
55 52
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
185 * must be called with session lock 182 * must be called with session lock
186 */ 183 */
187static void 184static void
188__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 185iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
189{ 186{
190 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 187 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
188 struct iscsi_r2t_info *r2t;
191 struct scsi_cmnd *sc; 189 struct scsi_cmnd *sc;
192 190
191 /* flush ctask's r2t queues */
192 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
193 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
194 sizeof(void*));
195 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
196 }
197
193 sc = ctask->sc; 198 sc = ctask->sc;
194 if (unlikely(!sc)) 199 if (unlikely(!sc))
195 return; 200 return;
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
374 spin_unlock(&session->lock); 379 spin_unlock(&session->lock);
375 return 0; 380 return 0;
376 } 381 }
382
377 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 383 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
378 BUG_ON(!rc); 384 BUG_ON(!rc);
379 385
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
399 tcp_ctask->exp_r2tsn = r2tsn + 1; 405 tcp_ctask->exp_r2tsn = r2tsn + 1;
400 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 406 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
401 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 407 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
402 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 408 list_move_tail(&ctask->running, &conn->xmitqueue);
403 409
404 scsi_queue_work(session->host, &conn->xmitwork); 410 scsi_queue_work(session->host, &conn->xmitwork);
405 conn->r2t_pdus_cnt++; 411 conn->r2t_pdus_cnt++;
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
477 case ISCSI_OP_SCSI_DATA_IN: 483 case ISCSI_OP_SCSI_DATA_IN:
478 tcp_conn->in.ctask = session->cmds[itt]; 484 tcp_conn->in.ctask = session->cmds[itt];
479 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 485 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
486 if (rc)
487 return rc;
480 /* fall through */ 488 /* fall through */
481 case ISCSI_OP_SCSI_CMD_RSP: 489 case ISCSI_OP_SCSI_CMD_RSP:
482 tcp_conn->in.ctask = session->cmds[itt]; 490 tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
484 goto copy_hdr; 492 goto copy_hdr;
485 493
486 spin_lock(&session->lock); 494 spin_lock(&session->lock);
487 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 495 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
488 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 496 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
489 spin_unlock(&session->lock); 497 spin_unlock(&session->lock);
490 break; 498 break;
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
500 break; 508 break;
501 case ISCSI_OP_LOGIN_RSP: 509 case ISCSI_OP_LOGIN_RSP:
502 case ISCSI_OP_TEXT_RSP: 510 case ISCSI_OP_TEXT_RSP:
503 case ISCSI_OP_LOGOUT_RSP:
504 case ISCSI_OP_NOOP_IN:
505 case ISCSI_OP_REJECT: 511 case ISCSI_OP_REJECT:
506 case ISCSI_OP_ASYNC_EVENT: 512 case ISCSI_OP_ASYNC_EVENT:
513 /*
514 * It is possible that we could get a PDU with a buffer larger
515 * than 8K, but there are no targets that currently do this.
516 * For now we fail until we find a vendor that needs it
517 */
518 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
519 tcp_conn->in.datalen) {
520 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
521 "but conn buffer is only %u (opcode %0x)\n",
522 tcp_conn->in.datalen,
523 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
524 rc = ISCSI_ERR_PROTO;
525 break;
526 }
527
507 if (tcp_conn->in.datalen) 528 if (tcp_conn->in.datalen)
508 goto copy_hdr; 529 goto copy_hdr;
509 /* fall through */ 530 /* fall through */
531 case ISCSI_OP_LOGOUT_RSP:
532 case ISCSI_OP_NOOP_IN:
510 case ISCSI_OP_SCSI_TMFUNC_RSP: 533 case ISCSI_OP_SCSI_TMFUNC_RSP:
511 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 534 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
512 break; 535 break;
@@ -523,7 +546,7 @@ copy_hdr:
523 * skbs to complete the command then we have to copy the header 546 * skbs to complete the command then we have to copy the header
524 * for later use 547 * for later use
525 */ 548 */
526 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < 549 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
527 (tcp_conn->in.datalen + tcp_conn->in.padding + 550 (tcp_conn->in.datalen + tcp_conn->in.padding +
528 (conn->datadgst_en ? 4 : 0))) { 551 (conn->datadgst_en ? 4 : 0))) {
529 debug_tcp("Copying header for later use. in.copy %d in.datalen" 552 debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
614 * byte counters. 637 * byte counters.
615 **/ 638 **/
616static inline int 639static inline int
617iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) 640iscsi_tcp_copy(struct iscsi_conn *conn)
618{ 641{
619 void *buf = tcp_conn->data; 642 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
620 int buf_size = tcp_conn->in.datalen; 643 int buf_size = tcp_conn->in.datalen;
621 int buf_left = buf_size - tcp_conn->data_copied; 644 int buf_left = buf_size - tcp_conn->data_copied;
622 int size = min(tcp_conn->in.copy, buf_left); 645 int size = min(tcp_conn->in.copy, buf_left);
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
627 BUG_ON(size <= 0); 650 BUG_ON(size <= 0);
628 651
629 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 652 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
630 (char*)buf + tcp_conn->data_copied, size); 653 (char*)conn->data + tcp_conn->data_copied, size);
631 BUG_ON(rc); 654 BUG_ON(rc);
632 655
633 tcp_conn->in.offset += size; 656 tcp_conn->in.offset += size;
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
745done: 768done:
746 /* check for non-exceptional status */ 769 /* check for non-exceptional status */
747 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 770 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
748 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 771 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
749 (long)sc, sc->result, ctask->itt); 772 (long)sc, sc->result, ctask->itt,
773 tcp_conn->in.hdr->flags);
750 spin_lock(&conn->session->lock); 774 spin_lock(&conn->session->lock);
751 __iscsi_ctask_cleanup(conn, ctask); 775 iscsi_tcp_cleanup_ctask(conn, ctask);
752 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 776 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
753 spin_unlock(&conn->session->lock); 777 spin_unlock(&conn->session->lock);
754 } 778 }
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn)
769 break; 793 break;
770 case ISCSI_OP_SCSI_CMD_RSP: 794 case ISCSI_OP_SCSI_CMD_RSP:
771 spin_lock(&conn->session->lock); 795 spin_lock(&conn->session->lock);
772 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 796 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
773 spin_unlock(&conn->session->lock); 797 spin_unlock(&conn->session->lock);
774 case ISCSI_OP_TEXT_RSP: 798 case ISCSI_OP_TEXT_RSP:
775 case ISCSI_OP_LOGIN_RSP: 799 case ISCSI_OP_LOGIN_RSP:
776 case ISCSI_OP_NOOP_IN:
777 case ISCSI_OP_ASYNC_EVENT: 800 case ISCSI_OP_ASYNC_EVENT:
778 case ISCSI_OP_REJECT: 801 case ISCSI_OP_REJECT:
779 /* 802 /*
780 * Collect data segment to the connection's data 803 * Collect data segment to the connection's data
781 * placeholder 804 * placeholder
782 */ 805 */
783 if (iscsi_tcp_copy(tcp_conn)) { 806 if (iscsi_tcp_copy(conn)) {
784 rc = -EAGAIN; 807 rc = -EAGAIN;
785 goto exit; 808 goto exit;
786 } 809 }
787 810
788 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, 811 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
789 tcp_conn->in.datalen); 812 tcp_conn->in.datalen);
790 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) 813 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
791 iscsi_recv_digest_update(tcp_conn, tcp_conn->data, 814 iscsi_recv_digest_update(tcp_conn, conn->data,
792 tcp_conn->in.datalen); 815 tcp_conn->in.datalen);
793 break; 816 break;
794 default: 817 default:
@@ -843,7 +866,7 @@ more:
843 if (rc == -EAGAIN) 866 if (rc == -EAGAIN)
844 goto nomore; 867 goto nomore;
845 else { 868 else {
846 iscsi_conn_failure(conn, rc); 869 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
847 return 0; 870 return 0;
848 } 871 }
849 } 872 }
@@ -897,7 +920,7 @@ more:
897 if (rc) { 920 if (rc) {
898 if (rc == -EAGAIN) 921 if (rc == -EAGAIN)
899 goto again; 922 goto again;
900 iscsi_conn_failure(conn, rc); 923 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
901 return 0; 924 return 0;
902 } 925 }
903 tcp_conn->in.copy -= tcp_conn->in.padding; 926 tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1028} 1051}
1029 1052
1030static void 1053static void
1031iscsi_conn_restore_callbacks(struct iscsi_conn *conn) 1054iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1032{ 1055{
1033 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1034 struct sock *sk = tcp_conn->sock->sk; 1056 struct sock *sk = tcp_conn->sock->sk;
1035 1057
1036 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 1058 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1308 ctask->imm_count - 1330 ctask->imm_count -
1309 ctask->unsol_count; 1331 ctask->unsol_count;
1310 1332
1311 debug_scsi("cmd [itt %x total %d imm %d imm_data %d " 1333 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
1312 "r2t_data %d]\n", 1334 "r2t_data %d]\n",
1313 ctask->itt, ctask->total_length, ctask->imm_count, 1335 ctask->itt, ctask->total_length, ctask->imm_count,
1314 ctask->unsol_count, tcp_ctask->r2t_data_count); 1336 ctask->unsol_count, tcp_ctask->r2t_data_count);
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1636 } 1658 }
1637solicit_again: 1659solicit_again:
1638 /* 1660 /*
1639 * send Data-Out whitnin this R2T sequence. 1661 * send Data-Out within this R2T sequence.
1640 */ 1662 */
1641 if (!r2t->data_count) 1663 if (!r2t->data_count)
1642 goto data_out_done; 1664 goto data_out_done;
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1731 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1753 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1732 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1754 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1733 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1755 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1734 int sent, rc; 1756 int sent = 0, rc;
1735 1757
1736 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1758 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1737 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1759 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1900 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1922 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1901 /* initial operational parameters */ 1923 /* initial operational parameters */
1902 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1924 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1903 tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
1904
1905 /* allocate initial PDU receive place holder */
1906 if (tcp_conn->data_size <= PAGE_SIZE)
1907 tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL);
1908 else
1909 tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL,
1910 get_order(tcp_conn->data_size));
1911 if (!tcp_conn->data)
1912 goto max_recv_dlenght_alloc_fail;
1913 1925
1914 return cls_conn; 1926 return cls_conn;
1915 1927
1916max_recv_dlenght_alloc_fail:
1917 kfree(tcp_conn);
1918tcp_conn_alloc_fail: 1928tcp_conn_alloc_fail:
1919 iscsi_conn_teardown(cls_conn); 1929 iscsi_conn_teardown(cls_conn);
1920 return NULL; 1930 return NULL;
1921} 1931}
1922 1932
1923static void 1933static void
1934iscsi_tcp_release_conn(struct iscsi_conn *conn)
1935{
1936 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1937
1938 if (!tcp_conn->sock)
1939 return;
1940
1941 sock_hold(tcp_conn->sock->sk);
1942 iscsi_conn_restore_callbacks(tcp_conn);
1943 sock_put(tcp_conn->sock->sk);
1944
1945 sock_release(tcp_conn->sock);
1946 tcp_conn->sock = NULL;
1947 conn->recv_lock = NULL;
1948}
1949
1950static void
1924iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) 1951iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1925{ 1952{
1926 struct iscsi_conn *conn = cls_conn->dd_data; 1953 struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1930 if (conn->hdrdgst_en || conn->datadgst_en) 1957 if (conn->hdrdgst_en || conn->datadgst_en)
1931 digest = 1; 1958 digest = 1;
1932 1959
1960 iscsi_tcp_release_conn(conn);
1933 iscsi_conn_teardown(cls_conn); 1961 iscsi_conn_teardown(cls_conn);
1934 1962
1935 /* now free tcp_conn */ 1963 /* now free tcp_conn */
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1944 crypto_free_tfm(tcp_conn->data_rx_tfm); 1972 crypto_free_tfm(tcp_conn->data_rx_tfm);
1945 } 1973 }
1946 1974
1947 /* free conn->data, size = MaxRecvDataSegmentLength */
1948 if (tcp_conn->data_size <= PAGE_SIZE)
1949 kfree(tcp_conn->data);
1950 else
1951 free_pages((unsigned long)tcp_conn->data,
1952 get_order(tcp_conn->data_size));
1953 kfree(tcp_conn); 1975 kfree(tcp_conn);
1954} 1976}
1955 1977
1978static void
1979iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1980{
1981 struct iscsi_conn *conn = cls_conn->dd_data;
1982
1983 iscsi_conn_stop(cls_conn, flag);
1984 iscsi_tcp_release_conn(conn);
1985}
1986
1956static int 1987static int
1957iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1988iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1958 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1989 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
2001 return 0; 2032 return 0;
2002} 2033}
2003 2034
2004static void
2005iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2006{
2007 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2008 struct iscsi_r2t_info *r2t;
2009
2010 /* flush ctask's r2t queues */
2011 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
2012 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
2013 sizeof(void*));
2014
2015 __iscsi_ctask_cleanup(conn, ctask);
2016}
2017
2018static void
2019iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
2020{
2021 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2022 struct sock *sk;
2023
2024 if (!tcp_conn->sock)
2025 return;
2026
2027 sk = tcp_conn->sock->sk;
2028 write_lock_bh(&sk->sk_callback_lock);
2029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2030 write_unlock_bh(&sk->sk_callback_lock);
2031}
2032
2033static void
2034iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
2035{
2036 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2037
2038 if (!tcp_conn->sock)
2039 return;
2040
2041 sock_hold(tcp_conn->sock->sk);
2042 iscsi_conn_restore_callbacks(conn);
2043 sock_put(tcp_conn->sock->sk);
2044
2045 sock_release(tcp_conn->sock);
2046 tcp_conn->sock = NULL;
2047 conn->recv_lock = NULL;
2048}
2049
2050/* called with host lock */ 2035/* called with host lock */
2051static void 2036static void
2052iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 2037iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
2057 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, 2042 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
2058 sizeof(struct iscsi_hdr)); 2043 sizeof(struct iscsi_hdr));
2059 tcp_mtask->xmstate = XMSTATE_IMM_HDR; 2044 tcp_mtask->xmstate = XMSTATE_IMM_HDR;
2045 tcp_mtask->sent = 0;
2060 2046
2061 if (mtask->data_count) 2047 if (mtask->data_count)
2062 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, 2048 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2138 int value; 2124 int value;
2139 2125
2140 switch(param) { 2126 switch(param) {
2141 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
2142 char *saveptr = tcp_conn->data;
2143 gfp_t flags = GFP_KERNEL;
2144
2145 sscanf(buf, "%d", &value);
2146 if (tcp_conn->data_size >= value) {
2147 iscsi_set_param(cls_conn, param, buf, buflen);
2148 break;
2149 }
2150
2151 spin_lock_bh(&session->lock);
2152 if (conn->stop_stage == STOP_CONN_RECOVER)
2153 flags = GFP_ATOMIC;
2154 spin_unlock_bh(&session->lock);
2155
2156 if (value <= PAGE_SIZE)
2157 tcp_conn->data = kmalloc(value, flags);
2158 else
2159 tcp_conn->data = (void*)__get_free_pages(flags,
2160 get_order(value));
2161 if (tcp_conn->data == NULL) {
2162 tcp_conn->data = saveptr;
2163 return -ENOMEM;
2164 }
2165 if (tcp_conn->data_size <= PAGE_SIZE)
2166 kfree(saveptr);
2167 else
2168 free_pages((unsigned long)saveptr,
2169 get_order(tcp_conn->data_size));
2170 iscsi_set_param(cls_conn, param, buf, buflen);
2171 tcp_conn->data_size = value;
2172 break;
2173 }
2174 case ISCSI_PARAM_HDRDGST_EN: 2127 case ISCSI_PARAM_HDRDGST_EN:
2175 iscsi_set_param(cls_conn, param, buf, buflen); 2128 iscsi_set_param(cls_conn, param, buf, buflen);
2176 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 2129 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
2361} 2314}
2362 2315
2363static struct scsi_host_template iscsi_sht = { 2316static struct scsi_host_template iscsi_sht = {
2364 .name = "iSCSI Initiator over TCP/IP, v" 2317 .name = "iSCSI Initiator over TCP/IP",
2365 ISCSI_TCP_VERSION,
2366 .queuecommand = iscsi_queuecommand, 2318 .queuecommand = iscsi_queuecommand,
2367 .change_queue_depth = iscsi_change_queue_depth, 2319 .change_queue_depth = iscsi_change_queue_depth,
2368 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2320 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
2414 .get_conn_param = iscsi_tcp_conn_get_param, 2366 .get_conn_param = iscsi_tcp_conn_get_param,
2415 .get_session_param = iscsi_session_get_param, 2367 .get_session_param = iscsi_session_get_param,
2416 .start_conn = iscsi_conn_start, 2368 .start_conn = iscsi_conn_start,
2417 .stop_conn = iscsi_conn_stop, 2369 .stop_conn = iscsi_tcp_conn_stop,
2418 /* these are called as part of conn recovery */
2419 .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
2420 .terminate_conn = iscsi_tcp_terminate_conn,
2421 /* IO */ 2370 /* IO */
2422 .send_pdu = iscsi_conn_send_pdu, 2371 .send_pdu = iscsi_conn_send_pdu,
2423 .get_stats = iscsi_conn_get_stats, 2372 .get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 808302832e68..6a4ee704e46e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn {
78 char hdrext[4*sizeof(__u16) + 78 char hdrext[4*sizeof(__u16) +
79 sizeof(__u32)]; 79 sizeof(__u32)];
80 int data_copied; 80 int data_copied;
81 char *data; /* data placeholder */
82 int data_size; /* actual recv_dlength */
83 int stop_stage; /* conn_stop() flag: * 81 int stop_stage; /* conn_stop() flag: *
84 * stop to recover, * 82 * stop to recover, *
85 * stop to terminate */ 83 * stop to terminate */
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 16fc2dd8f2f7..73dd6c8deede 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2747 return rc; 2747 return rc;
2748 2748
2749 scontrol = (scontrol & 0x0f0) | 0x302; 2749 scontrol = (scontrol & 0x0f0) | 0x304;
2750 2750
2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2752 return rc; 2752 return rc;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7e6e031cc41b..5884cd26d53a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session,
189{ 189{
190 struct scsi_cmnd *sc = ctask->sc; 190 struct scsi_cmnd *sc = ctask->sc;
191 191
192 ctask->state = ISCSI_TASK_COMPLETED;
192 ctask->sc = NULL; 193 ctask->sc = NULL;
193 list_del_init(&ctask->running); 194 list_del_init(&ctask->running);
194 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 195 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
@@ -275,6 +276,25 @@ out:
275 return rc; 276 return rc;
276} 277}
277 278
279static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
280{
281 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
282
283 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
284 conn->tmfrsp_pdus_cnt++;
285
286 if (conn->tmabort_state != TMABORT_INITIAL)
287 return;
288
289 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
290 conn->tmabort_state = TMABORT_SUCCESS;
291 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
292 conn->tmabort_state = TMABORT_NOT_FOUND;
293 else
294 conn->tmabort_state = TMABORT_FAILED;
295 wake_up(&conn->ehwait);
296}
297
278/** 298/**
279 * __iscsi_complete_pdu - complete pdu 299 * __iscsi_complete_pdu - complete pdu
280 * @conn: iscsi conn 300 * @conn: iscsi conn
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
340 360
341 switch(opcode) { 361 switch(opcode) {
342 case ISCSI_OP_LOGOUT_RSP: 362 case ISCSI_OP_LOGOUT_RSP:
363 if (datalen) {
364 rc = ISCSI_ERR_PROTO;
365 break;
366 }
343 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 367 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
344 /* fall through */ 368 /* fall through */
345 case ISCSI_OP_LOGIN_RSP: 369 case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
348 * login related PDU's exp_statsn is handled in 372 * login related PDU's exp_statsn is handled in
349 * userspace 373 * userspace
350 */ 374 */
351 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 375 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
376 rc = ISCSI_ERR_CONN_FAILED;
352 list_del(&mtask->running); 377 list_del(&mtask->running);
353 if (conn->login_mtask != mtask) 378 if (conn->login_mtask != mtask)
354 __kfifo_put(session->mgmtpool.queue, 379 __kfifo_put(session->mgmtpool.queue,
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
360 break; 385 break;
361 } 386 }
362 387
363 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 388 iscsi_tmf_rsp(conn, hdr);
364 conn->tmfrsp_pdus_cnt++;
365 if (conn->tmabort_state == TMABORT_INITIAL) {
366 conn->tmabort_state =
367 ((struct iscsi_tm_rsp *)hdr)->
368 response == ISCSI_TMF_RSP_COMPLETE ?
369 TMABORT_SUCCESS:TMABORT_FAILED;
370 /* unblock eh_abort() */
371 wake_up(&conn->ehwait);
372 }
373 break; 389 break;
374 case ISCSI_OP_NOOP_IN: 390 case ISCSI_OP_NOOP_IN:
375 if (hdr->ttt != ISCSI_RESERVED_TAG) { 391 if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
376 rc = ISCSI_ERR_PROTO; 392 rc = ISCSI_ERR_PROTO;
377 break; 393 break;
378 } 394 }
379 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 395 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
380 396
381 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 397 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
398 rc = ISCSI_ERR_CONN_FAILED;
382 list_del(&mtask->running); 399 list_del(&mtask->running);
383 if (conn->login_mtask != mtask) 400 if (conn->login_mtask != mtask)
384 __kfifo_put(session->mgmtpool.queue, 401 __kfifo_put(session->mgmtpool.queue,
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
391 } else if (itt == ISCSI_RESERVED_TAG) { 408 } else if (itt == ISCSI_RESERVED_TAG) {
392 switch(opcode) { 409 switch(opcode) {
393 case ISCSI_OP_NOOP_IN: 410 case ISCSI_OP_NOOP_IN:
394 if (!datalen) { 411 if (datalen) {
395 rc = iscsi_check_assign_cmdsn(session,
396 (struct iscsi_nopin*)hdr);
397 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
398 rc = iscsi_recv_pdu(conn->cls_conn,
399 hdr, NULL, 0);
400 } else
401 rc = ISCSI_ERR_PROTO; 412 rc = ISCSI_ERR_PROTO;
413 break;
414 }
415
416 rc = iscsi_check_assign_cmdsn(session,
417 (struct iscsi_nopin*)hdr);
418 if (rc)
419 break;
420
421 if (hdr->ttt == ISCSI_RESERVED_TAG)
422 break;
423
424 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
425 rc = ISCSI_ERR_CONN_FAILED;
402 break; 426 break;
403 case ISCSI_OP_REJECT: 427 case ISCSI_OP_REJECT:
404 /* we need sth like iscsi_reject_rsp()*/ 428 /* we need sth like iscsi_reject_rsp()*/
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
568 } 592 }
569 593
570 /* process command queue */ 594 /* process command queue */
571 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, 595 spin_lock_bh(&conn->session->lock);
572 sizeof(void*))) { 596 while (!list_empty(&conn->xmitqueue)) {
573 /* 597 /*
574 * iscsi tcp may readd the task to the xmitqueue to send 598 * iscsi tcp may readd the task to the xmitqueue to send
575 * write data 599 * write data
576 */ 600 */
577 spin_lock_bh(&conn->session->lock); 601 conn->ctask = list_entry(conn->xmitqueue.next,
578 if (list_empty(&conn->ctask->running)) 602 struct iscsi_cmd_task, running);
579 list_add_tail(&conn->ctask->running, &conn->run_list); 603 conn->ctask->state = ISCSI_TASK_RUNNING;
604 list_move_tail(conn->xmitqueue.next, &conn->run_list);
580 spin_unlock_bh(&conn->session->lock); 605 spin_unlock_bh(&conn->session->lock);
606
581 rc = tt->xmit_cmd_task(conn, conn->ctask); 607 rc = tt->xmit_cmd_task(conn, conn->ctask);
582 if (rc) 608 if (rc)
583 goto again; 609 goto again;
610 spin_lock_bh(&conn->session->lock);
584 } 611 }
612 spin_unlock_bh(&conn->session->lock);
585 /* done with this ctask */ 613 /* done with this ctask */
586 conn->ctask = NULL; 614 conn->ctask = NULL;
587 615
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
691 sc->SCp.phase = session->age; 719 sc->SCp.phase = session->age;
692 sc->SCp.ptr = (char *)ctask; 720 sc->SCp.ptr = (char *)ctask;
693 721
722 ctask->state = ISCSI_TASK_PENDING;
694 ctask->mtask = NULL; 723 ctask->mtask = NULL;
695 ctask->conn = conn; 724 ctask->conn = conn;
696 ctask->sc = sc; 725 ctask->sc = sc;
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
700 729
701 session->tt->init_cmd_task(ctask); 730 session->tt->init_cmd_task(ctask);
702 731
703 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 732 list_add_tail(&ctask->running, &conn->xmitqueue);
704 debug_scsi( 733 debug_scsi(
705 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 734 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
706 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 735 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
977/* 1006/*
978 * xmit mutex and session lock must be held 1007 * xmit mutex and session lock must be held
979 */ 1008 */
980#define iscsi_remove_task(tasktype) \ 1009static struct iscsi_mgmt_task *
981static struct iscsi_##tasktype * \ 1010iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
982iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ 1011{
983{ \ 1012 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
984 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ 1013 struct iscsi_mgmt_task *task;
985 struct iscsi_##tasktype *task; \
986 \
987 debug_scsi("searching %d tasks\n", nr_tasks); \
988 \
989 for (i = 0; i < nr_tasks; i++) { \
990 __kfifo_get(fifo, (void*)&task, sizeof(void*)); \
991 debug_scsi("check task %u\n", task->itt); \
992 \
993 if (task->itt == itt) { \
994 debug_scsi("matched task\n"); \
995 return task; \
996 } \
997 \
998 __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
999 } \
1000 return NULL; \
1001}
1002 1014
1003iscsi_remove_task(mgmt_task); 1015 debug_scsi("searching %d tasks\n", nr_tasks);
1004iscsi_remove_task(cmd_task); 1016
1017 for (i = 0; i < nr_tasks; i++) {
1018 __kfifo_get(fifo, (void*)&task, sizeof(void*));
1019 debug_scsi("check task %u\n", task->itt);
1020
1021 if (task->itt == itt) {
1022 debug_scsi("matched task\n");
1023 return task;
1024 }
1025
1026 __kfifo_put(fifo, (void*)&task, sizeof(void*));
1027 }
1028 return NULL;
1029}
1005 1030
1006static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) 1031static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1007{ 1032{
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1027{ 1052{
1028 struct scsi_cmnd *sc; 1053 struct scsi_cmnd *sc;
1029 1054
1030 conn->session->tt->cleanup_cmd_task(conn, ctask);
1031 iscsi_ctask_mtask_cleanup(ctask);
1032
1033 sc = ctask->sc; 1055 sc = ctask->sc;
1034 if (!sc) 1056 if (!sc)
1035 return; 1057 return;
1058
1059 conn->session->tt->cleanup_cmd_task(conn, ctask);
1060 iscsi_ctask_mtask_cleanup(ctask);
1061
1036 sc->result = err; 1062 sc->result = err;
1037 sc->resid = sc->request_bufflen; 1063 sc->resid = sc->request_bufflen;
1038 iscsi_complete_command(conn->session, ctask); 1064 iscsi_complete_command(conn->session, ctask);
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1043 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1069 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1044 struct iscsi_conn *conn = ctask->conn; 1070 struct iscsi_conn *conn = ctask->conn;
1045 struct iscsi_session *session = conn->session; 1071 struct iscsi_session *session = conn->session;
1046 struct iscsi_cmd_task *pending_ctask;
1047 int rc; 1072 int rc;
1048 1073
1049 conn->eh_abort_cnt++; 1074 conn->eh_abort_cnt++;
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1061 goto failed; 1086 goto failed;
1062 1087
1063 /* ctask completed before time out */ 1088 /* ctask completed before time out */
1064 if (!ctask->sc) 1089 if (!ctask->sc) {
1065 goto success; 1090 spin_unlock_bh(&session->lock);
1091 debug_scsi("sc completed while abort in progress\n");
1092 goto success_rel_mutex;
1093 }
1066 1094
1067 /* what should we do here ? */ 1095 /* what should we do here ? */
1068 if (conn->ctask == ctask) { 1096 if (conn->ctask == ctask) {
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1071 goto failed; 1099 goto failed;
1072 } 1100 }
1073 1101
1074 /* check for the easy pending cmd abort */ 1102 if (ctask->state == ISCSI_TASK_PENDING)
1075 pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); 1103 goto success_cleanup;
1076 if (pending_ctask) {
1077 /* iscsi_tcp queues write transfers on the xmitqueue */
1078 if (list_empty(&pending_ctask->running)) {
1079 debug_scsi("found pending task\n");
1080 goto success;
1081 } else
1082 __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
1083 sizeof(void*));
1084 }
1085 1104
1086 conn->tmabort_state = TMABORT_INITIAL; 1105 conn->tmabort_state = TMABORT_INITIAL;
1087 1106
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1089 rc = iscsi_exec_abort_task(sc, ctask); 1108 rc = iscsi_exec_abort_task(sc, ctask);
1090 spin_lock_bh(&session->lock); 1109 spin_lock_bh(&session->lock);
1091 1110
1092 iscsi_ctask_mtask_cleanup(ctask);
1093 if (rc || sc->SCp.phase != session->age || 1111 if (rc || sc->SCp.phase != session->age ||
1094 session->state != ISCSI_STATE_LOGGED_IN) 1112 session->state != ISCSI_STATE_LOGGED_IN)
1095 goto failed; 1113 goto failed;
1114 iscsi_ctask_mtask_cleanup(ctask);
1096 1115
1097 /* ctask completed before tmf abort response */ 1116 switch (conn->tmabort_state) {
1098 if (!ctask->sc) { 1117 case TMABORT_SUCCESS:
1099 debug_scsi("sc completed while abort in progress\n"); 1118 goto success_cleanup;
1100 goto success; 1119 case TMABORT_NOT_FOUND:
1101 } 1120 if (!ctask->sc) {
1102 1121 /* ctask completed before tmf abort response */
1103 if (conn->tmabort_state != TMABORT_SUCCESS) { 1122 spin_unlock_bh(&session->lock);
1123 debug_scsi("sc completed while abort in progress\n");
1124 goto success_rel_mutex;
1125 }
1126 /* fall through */
1127 default:
1128 /* timedout or failed */
1104 spin_unlock_bh(&session->lock); 1129 spin_unlock_bh(&session->lock);
1105 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1130 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1106 spin_lock_bh(&session->lock); 1131 spin_lock_bh(&session->lock);
1107 goto failed; 1132 goto failed;
1108 } 1133 }
1109 1134
1110success: 1135success_cleanup:
1111 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1136 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1112 spin_unlock_bh(&session->lock); 1137 spin_unlock_bh(&session->lock);
1113 1138
@@ -1121,6 +1146,7 @@ success:
1121 spin_unlock(&session->lock); 1146 spin_unlock(&session->lock);
1122 write_unlock_bh(conn->recv_lock); 1147 write_unlock_bh(conn->recv_lock);
1123 1148
1149success_rel_mutex:
1124 mutex_unlock(&conn->xmitmutex); 1150 mutex_unlock(&conn->xmitmutex);
1125 return SUCCESS; 1151 return SUCCESS;
1126 1152
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1263 if (cmd_task_size) 1289 if (cmd_task_size)
1264 ctask->dd_data = &ctask[1]; 1290 ctask->dd_data = &ctask[1];
1265 ctask->itt = cmd_i; 1291 ctask->itt = cmd_i;
1292 INIT_LIST_HEAD(&ctask->running);
1266 } 1293 }
1267 1294
1268 spin_lock_init(&session->lock); 1295 spin_lock_init(&session->lock);
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1282 if (mgmt_task_size) 1309 if (mgmt_task_size)
1283 mtask->dd_data = &mtask[1]; 1310 mtask->dd_data = &mtask[1];
1284 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; 1311 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1312 INIT_LIST_HEAD(&mtask->running);
1285 } 1313 }
1286 1314
1287 if (scsi_add_host(shost, NULL)) 1315 if (scsi_add_host(shost, NULL))
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1322{ 1350{
1323 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1351 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1324 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 1352 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1353 struct module *owner = cls_session->transport->owner;
1325 1354
1326 scsi_remove_host(shost); 1355 scsi_remove_host(shost);
1327 1356
1328 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1357 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
1329 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1358 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
1330 1359
1360 kfree(session->targetname);
1361
1331 iscsi_destroy_session(cls_session); 1362 iscsi_destroy_session(cls_session);
1332 scsi_host_put(shost); 1363 scsi_host_put(shost);
1333 module_put(cls_session->transport->owner); 1364 module_put(owner);
1334} 1365}
1335EXPORT_SYMBOL_GPL(iscsi_session_teardown); 1366EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1336 1367
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1361 conn->tmabort_state = TMABORT_INITIAL; 1392 conn->tmabort_state = TMABORT_INITIAL;
1362 INIT_LIST_HEAD(&conn->run_list); 1393 INIT_LIST_HEAD(&conn->run_list);
1363 INIT_LIST_HEAD(&conn->mgmt_run_list); 1394 INIT_LIST_HEAD(&conn->mgmt_run_list);
1364 1395 INIT_LIST_HEAD(&conn->xmitqueue);
1365 /* initialize general xmit PDU commands queue */
1366 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
1367 GFP_KERNEL, NULL);
1368 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
1369 goto xmitqueue_alloc_fail;
1370 1396
1371 /* initialize general immediate & non-immediate PDU commands queue */ 1397 /* initialize general immediate & non-immediate PDU commands queue */
1372 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1398 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1394 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1420 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
1395 if (!data) 1421 if (!data)
1396 goto login_mtask_data_alloc_fail; 1422 goto login_mtask_data_alloc_fail;
1397 conn->login_mtask->data = data; 1423 conn->login_mtask->data = conn->data = data;
1398 1424
1399 init_timer(&conn->tmabort_timer); 1425 init_timer(&conn->tmabort_timer);
1400 mutex_init(&conn->xmitmutex); 1426 mutex_init(&conn->xmitmutex);
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail:
1410mgmtqueue_alloc_fail: 1436mgmtqueue_alloc_fail:
1411 kfifo_free(conn->immqueue); 1437 kfifo_free(conn->immqueue);
1412immqueue_alloc_fail: 1438immqueue_alloc_fail:
1413 kfifo_free(conn->xmitqueue);
1414xmitqueue_alloc_fail:
1415 iscsi_destroy_conn(cls_conn); 1439 iscsi_destroy_conn(cls_conn);
1416 return NULL; 1440 return NULL;
1417} 1441}
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1432 1456
1433 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1457 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1434 mutex_lock(&conn->xmitmutex); 1458 mutex_lock(&conn->xmitmutex);
1435 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
1436 if (session->tt->suspend_conn_recv)
1437 session->tt->suspend_conn_recv(conn);
1438
1439 session->tt->terminate_conn(conn);
1440 }
1441 1459
1442 spin_lock_bh(&session->lock); 1460 spin_lock_bh(&session->lock);
1443 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1461 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1474 } 1492 }
1475 1493
1476 spin_lock_bh(&session->lock); 1494 spin_lock_bh(&session->lock);
1477 kfree(conn->login_mtask->data); 1495 kfree(conn->data);
1496 kfree(conn->persistent_address);
1478 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 1497 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
1479 sizeof(void*)); 1498 sizeof(void*));
1480 list_del(&conn->item); 1499 list_del(&conn->item);
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1489 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; 1508 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
1490 spin_unlock_bh(&session->lock); 1509 spin_unlock_bh(&session->lock);
1491 1510
1492 kfifo_free(conn->xmitqueue);
1493 kfifo_free(conn->immqueue); 1511 kfifo_free(conn->immqueue);
1494 kfifo_free(conn->mgmtqueue); 1512 kfifo_free(conn->mgmtqueue);
1495 1513
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
1572 struct iscsi_cmd_task *ctask, *tmp; 1590 struct iscsi_cmd_task *ctask, *tmp;
1573 1591
1574 /* flush pending */ 1592 /* flush pending */
1575 while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { 1593 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1576 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, 1594 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
1577 ctask->itt); 1595 ctask->itt);
1578 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1596 fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1615 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1633 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1616 spin_unlock_bh(&session->lock); 1634 spin_unlock_bh(&session->lock);
1617 1635
1618 if (session->tt->suspend_conn_recv) 1636 write_lock_bh(conn->recv_lock);
1619 session->tt->suspend_conn_recv(conn); 1637 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1638 write_unlock_bh(conn->recv_lock);
1620 1639
1621 mutex_lock(&conn->xmitmutex); 1640 mutex_lock(&conn->xmitmutex);
1622 /* 1641 /*
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1635 } 1654 }
1636 } 1655 }
1637 1656
1638 session->tt->terminate_conn(conn);
1639 /* 1657 /*
1640 * flush queues. 1658 * flush queues.
1641 */ 1659 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c68cdd8736f..d384c16f4a87 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST; 223 pmboxq->mb.mbxOwner = OWN_HOST;
224 224
225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
226 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
884 phba->sysfs_mbox.mbox == NULL ) { 884 phba->sysfs_mbox.mbox == NULL ) {
885 sysfs_mbox_idle(phba); 885 sysfs_mbox_idle(phba);
886 spin_unlock_irq(host->host_lock); 886 spin_unlock_irq(host->host_lock);
887 return -EINVAL; 887 return -EAGAIN;
888 } 888 }
889 } 889 }
890 890
@@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1000 spin_unlock_irq(phba->host->host_lock); 1000 spin_unlock_irq(phba->host->host_lock);
1001 rc = lpfc_sli_issue_mbox_wait (phba, 1001 rc = lpfc_sli_issue_mbox_wait (phba,
1002 phba->sysfs_mbox.mbox, 1002 phba->sysfs_mbox.mbox,
1003 phba->fc_ratov * 2); 1003 lpfc_mbox_tmo_val(phba,
1004 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1004 spin_lock_irq(phba->host->host_lock); 1005 spin_lock_irq(phba->host->host_lock);
1005 } 1006 }
1006 1007
1007 if (rc != MBX_SUCCESS) { 1008 if (rc != MBX_SUCCESS) {
1008 sysfs_mbox_idle(phba); 1009 sysfs_mbox_idle(phba);
1009 spin_unlock_irq(host->host_lock); 1010 spin_unlock_irq(host->host_lock);
1010 return -ENODEV; 1011 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1011 } 1012 }
1012 phba->sysfs_mbox.state = SMBOX_READING; 1013 phba->sysfs_mbox.state = SMBOX_READING;
1013 } 1014 }
@@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1016 printk(KERN_WARNING "mbox_read: Bad State\n"); 1017 printk(KERN_WARNING "mbox_read: Bad State\n");
1017 sysfs_mbox_idle(phba); 1018 sysfs_mbox_idle(phba);
1018 spin_unlock_irq(host->host_lock); 1019 spin_unlock_irq(host->host_lock);
1019 return -EINVAL; 1020 return -EAGAIN;
1020 } 1021 }
1021 1022
1022 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 1023 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
1210 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1211 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1211 struct lpfc_sli *psli = &phba->sli; 1212 struct lpfc_sli *psli = &phba->sli;
1212 struct fc_host_statistics *hs = &phba->link_stats; 1213 struct fc_host_statistics *hs = &phba->link_stats;
1214 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1213 LPFC_MBOXQ_t *pmboxq; 1215 LPFC_MBOXQ_t *pmboxq;
1214 MAILBOX_t *pmb; 1216 MAILBOX_t *pmb;
1217 unsigned long seconds;
1215 int rc = 0; 1218 int rc = 0;
1216 1219
1217 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1220 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
1272 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 1275 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1273 hs->error_frames = pmb->un.varRdLnk.crcCnt; 1276 hs->error_frames = pmb->un.varRdLnk.crcCnt;
1274 1277
1278 hs->link_failure_count -= lso->link_failure_count;
1279 hs->loss_of_sync_count -= lso->loss_of_sync_count;
1280 hs->loss_of_signal_count -= lso->loss_of_signal_count;
1281 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
1282 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
1283 hs->invalid_crc_count -= lso->invalid_crc_count;
1284 hs->error_frames -= lso->error_frames;
1285
1275 if (phba->fc_topology == TOPOLOGY_LOOP) { 1286 if (phba->fc_topology == TOPOLOGY_LOOP) {
1276 hs->lip_count = (phba->fc_eventTag >> 1); 1287 hs->lip_count = (phba->fc_eventTag >> 1);
1288 hs->lip_count -= lso->link_events;
1277 hs->nos_count = -1; 1289 hs->nos_count = -1;
1278 } else { 1290 } else {
1279 hs->lip_count = -1; 1291 hs->lip_count = -1;
1280 hs->nos_count = (phba->fc_eventTag >> 1); 1292 hs->nos_count = (phba->fc_eventTag >> 1);
1293 hs->nos_count -= lso->link_events;
1281 } 1294 }
1282 1295
1283 hs->dumped_frames = -1; 1296 hs->dumped_frames = -1;
1284 1297
1285/* FIX ME */ 1298 seconds = get_seconds();
1286 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ 1299 if (seconds < psli->stats_start)
1300 hs->seconds_since_last_reset = seconds +
1301 ((unsigned long)-1 - psli->stats_start);
1302 else
1303 hs->seconds_since_last_reset = seconds - psli->stats_start;
1287 1304
1288 return hs; 1305 return hs;
1289} 1306}
1290 1307
1308static void
1309lpfc_reset_stats(struct Scsi_Host *shost)
1310{
1311 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1312 struct lpfc_sli *psli = &phba->sli;
1313 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1314 LPFC_MBOXQ_t *pmboxq;
1315 MAILBOX_t *pmb;
1316 int rc = 0;
1317
1318 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1319 if (!pmboxq)
1320 return;
1321 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1322
1323 pmb = &pmboxq->mb;
1324 pmb->mbxCommand = MBX_READ_STATUS;
1325 pmb->mbxOwner = OWN_HOST;
1326 pmb->un.varWords[0] = 0x1; /* reset request */
1327 pmboxq->context1 = NULL;
1328
1329 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1330 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1331 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1332 else
1333 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1334
1335 if (rc != MBX_SUCCESS) {
1336 if (rc == MBX_TIMEOUT)
1337 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1338 else
1339 mempool_free(pmboxq, phba->mbox_mem_pool);
1340 return;
1341 }
1342
1343 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1344 pmb->mbxCommand = MBX_READ_LNK_STAT;
1345 pmb->mbxOwner = OWN_HOST;
1346 pmboxq->context1 = NULL;
1347
1348 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1349 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1350 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1351 else
1352 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1353
1354 if (rc != MBX_SUCCESS) {
1355 if (rc == MBX_TIMEOUT)
1356 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1357 else
1358 mempool_free( pmboxq, phba->mbox_mem_pool);
1359 return;
1360 }
1361
1362 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
1363 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
1364 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
1365 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
1366 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
1367 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1368 lso->error_frames = pmb->un.varRdLnk.crcCnt;
1369 lso->link_events = (phba->fc_eventTag >> 1);
1370
1371 psli->stats_start = get_seconds();
1372
1373 return;
1374}
1291 1375
1292/* 1376/*
1293 * The LPFC driver treats linkdown handling as target loss events so there 1377 * The LPFC driver treats linkdown handling as target loss events so there
@@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = {
1431 */ 1515 */
1432 1516
1433 .get_fc_host_stats = lpfc_get_stats, 1517 .get_fc_host_stats = lpfc_get_stats,
1434 1518 .reset_fc_host_stats = lpfc_reset_stats,
1435 /* the LPFC driver doesn't support resetting stats yet */
1436 1519
1437 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 1520 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
1438 .show_rport_maxframe_size = 1, 1521 .show_rport_maxframe_size = 1,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 517e9e4dd461..2a176467f71b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
130int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
130 131
131int lpfc_mem_alloc(struct lpfc_hba *); 132int lpfc_mem_alloc(struct lpfc_hba *);
132void lpfc_mem_free(struct lpfc_hba *); 133void lpfc_mem_free(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57af53e..bbb7310210b0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
131 } 131 }
132 132
133ct_unsol_event_exit_piocbq: 133ct_unsol_event_exit_piocbq:
134 list_del(&head);
134 if (pmbuf) { 135 if (pmbuf) {
135 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { 136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
136 lpfc_mbuf_free(phba, matp->virt, matp->phys); 137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
481 if (CTrsp->CommandResponse.bits.CmdRsp == 482 if (CTrsp->CommandResponse.bits.CmdRsp ==
482 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 483 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
483 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 484 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
484 "%d:0239 NameServer Rsp " 485 "%d:0208 NameServer Rsp "
485 "Data: x%x\n", 486 "Data: x%x\n",
486 phba->brd_no, 487 phba->brd_no,
487 phba->fc_flag); 488 phba->fc_flag);
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
588 589
589 lpfc_decode_firmware_rev(phba, fwrev, 0); 590 lpfc_decode_firmware_rev(phba, fwrev, 0);
590 591
591 if (phba->Port[0]) { 592 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
592 sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, 593 fwrev, lpfc_release_version);
593 phba->Port, fwrev, lpfc_release_version); 594 return;
594 } else {
595 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
596 fwrev, lpfc_release_version);
597 }
598} 595}
599 596
600/* 597/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b89f6cb641e6..3567de613162 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1848,9 +1848,12 @@ static void
1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1849 struct lpfc_iocbq * rspiocb) 1849 struct lpfc_iocbq * rspiocb)
1850{ 1850{
1851 IOCB_t *irsp;
1851 struct lpfc_nodelist *ndlp; 1852 struct lpfc_nodelist *ndlp;
1852 LPFC_MBOXQ_t *mbox = NULL; 1853 LPFC_MBOXQ_t *mbox = NULL;
1853 1854
1855 irsp = &rspiocb->iocb;
1856
1854 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1857 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1855 if (cmdiocb->context_un.mbox) 1858 if (cmdiocb->context_un.mbox)
1856 mbox = cmdiocb->context_un.mbox; 1859 mbox = cmdiocb->context_un.mbox;
@@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1893 mempool_free( mbox, phba->mbox_mem_pool); 1896 mempool_free( mbox, phba->mbox_mem_pool);
1894 } else { 1897 } else {
1895 mempool_free( mbox, phba->mbox_mem_pool); 1898 mempool_free( mbox, phba->mbox_mem_pool);
1896 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1897 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1898 ndlp = NULL; 1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1902 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1903 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1904 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1905 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1906 ndlp = NULL;
1907 }
1899 } 1908 }
1900 } 1909 }
1901 } 1910 }
@@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2839 2848
2840 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 2849 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2841 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2850 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2842 "%d:0128 Xmit ELS RPS ACC response tag x%x " 2851 "%d:0118 Xmit ELS RPS ACC response tag x%x "
2843 "Data: x%x x%x x%x x%x x%x\n", 2852 "Data: x%x x%x x%x x%x x%x\n",
2844 phba->brd_no, 2853 phba->brd_no,
2845 elsiocb->iocb.ulpIoTag, 2854 elsiocb->iocb.ulpIoTag,
@@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2948 2957
2949 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 2958 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2950 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2959 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2951 "%d:0128 Xmit ELS RPL ACC response tag x%x " 2960 "%d:0120 Xmit ELS RPL ACC response tag x%x "
2952 "Data: x%x x%x x%x x%x x%x\n", 2961 "Data: x%x x%x x%x x%x x%x\n",
2953 phba->brd_no, 2962 phba->brd_no,
2954 elsiocb->iocb.ulpIoTag, 2963 elsiocb->iocb.ulpIoTag,
@@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3109 struct lpfc_nodelist *ndlp, *next_ndlp; 3118 struct lpfc_nodelist *ndlp, *next_ndlp;
3110 3119
3111 /* FAN received */ 3120 /* FAN received */
3112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", 3121 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
3113 phba->brd_no); 3122 phba->brd_no);
3114 3123
3115 icmd = &cmdiocb->iocb; 3124 icmd = &cmdiocb->iocb;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4d6cf990c4fc..b2f1552f1848 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1558 } 1558 }
1559 } 1559 }
1560
1561 spin_lock_irq(phba->host->host_lock);
1560 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1562 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1561 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1563 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1562 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1564 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1569 mempool_free(mb, phba->mbox_mem_pool); 1571 mempool_free(mb, phba->mbox_mem_pool);
1570 } 1572 }
1571 } 1573 }
1574 spin_unlock_irq(phba->host->host_lock);
1572 1575
1573 lpfc_els_abort(phba,ndlp,0); 1576 lpfc_els_abort(phba,ndlp,0);
1574 spin_lock_irq(phba->host->host_lock); 1577 spin_lock_irq(phba->host->host_lock);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1782 /* LOG change to REGLOGIN */ 1785 /* LOG change to REGLOGIN */
1783 /* FIND node DID reglogin */ 1786 /* FIND node DID reglogin */
1784 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1785 "%d:0931 FIND node DID reglogin" 1788 "%d:0901 FIND node DID reglogin"
1786 " Data: x%p x%x x%x x%x\n", 1789 " Data: x%p x%x x%x x%x\n",
1787 phba->brd_no, 1790 phba->brd_no,
1788 ndlp, ndlp->nlp_DID, 1791 ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1805 /* LOG change to PRLI */ 1808 /* LOG change to PRLI */
1806 /* FIND node DID prli */ 1809 /* FIND node DID prli */
1807 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1808 "%d:0931 FIND node DID prli " 1811 "%d:0902 FIND node DID prli "
1809 "Data: x%p x%x x%x x%x\n", 1812 "Data: x%p x%x x%x x%x\n",
1810 phba->brd_no, 1813 phba->brd_no,
1811 ndlp, ndlp->nlp_DID, 1814 ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1828 /* LOG change to NPR */ 1831 /* LOG change to NPR */
1829 /* FIND node DID npr */ 1832 /* FIND node DID npr */
1830 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1831 "%d:0931 FIND node DID npr " 1834 "%d:0903 FIND node DID npr "
1832 "Data: x%p x%x x%x x%x\n", 1835 "Data: x%p x%x x%x x%x\n",
1833 phba->brd_no, 1836 phba->brd_no,
1834 ndlp, ndlp->nlp_DID, 1837 ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1851 /* LOG change to UNUSED */ 1854 /* LOG change to UNUSED */
1852 /* FIND node DID unused */ 1855 /* FIND node DID unused */
1853 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1854 "%d:0931 FIND node DID unused " 1857 "%d:0905 FIND node DID unused "
1855 "Data: x%p x%x x%x x%x\n", 1858 "Data: x%p x%x x%x x%x\n",
1856 phba->brd_no, 1859 phba->brd_no,
1857 ndlp, ndlp->nlp_DID, 1860 ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2335 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2338 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2336 if (!initlinkmbox) { 2339 if (!initlinkmbox) {
2337 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2340 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2338 "%d:0226 Device Discovery " 2341 "%d:0206 Device Discovery "
2339 "completion error\n", 2342 "completion error\n",
2340 phba->brd_no); 2343 phba->brd_no);
2341 phba->hba_state = LPFC_HBA_ERROR; 2344 phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2365 if (!clearlambox) { 2368 if (!clearlambox) {
2366 clrlaerr = 1; 2369 clrlaerr = 1;
2367 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2370 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2368 "%d:0226 Device Discovery " 2371 "%d:0207 Device Discovery "
2369 "completion error\n", 2372 "completion error\n",
2370 phba->brd_no); 2373 phba->brd_no);
2371 phba->hba_state = LPFC_HBA_ERROR; 2374 phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ef47b824cbed..f6948ffe689a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba)
1379 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1380 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1381 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
1382 phba->work_ha = 0;
1382 1383
1383 lpfc_printf_log(phba, 1384 lpfc_printf_log(phba,
1384 KERN_WARNING, 1385 KERN_WARNING,
@@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1616 goto out_free_iocbq; 1617 goto out_free_iocbq;
1617 } 1618 }
1618 1619
1619 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1620 /*
1621 * Set initial can_queue value since 0 is no longer supported and
1622 * scsi_add_host will fail. This will be adjusted later based on the
1623 * max xri value determined in hba setup.
1624 */
1620 host->can_queue = phba->cfg_hba_queue_depth - 10; 1625 host->can_queue = phba->cfg_hba_queue_depth - 10;
1621 1626
1622 /* Tell the midlayer we support 16 byte commands */ 1627 /* Tell the midlayer we support 16 byte commands */
@@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1656 goto out_free_irq; 1661 goto out_free_irq;
1657 } 1662 }
1658 1663
1664 /*
1665 * hba setup may have changed the hba_queue_depth so we need to adjust
1666 * the value of can_queue.
1667 */
1668 host->can_queue = phba->cfg_hba_queue_depth - 10;
1669
1659 lpfc_discovery_wait(phba); 1670 lpfc_discovery_wait(phba);
1660 1671
1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1672 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22aaf71b..4d016c2a1b26 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
651 651
652 return mbq; 652 return mbq;
653} 653}
654
655int
656lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
657{
658 switch (cmd) {
659 case MBX_WRITE_NV: /* 0x03 */
660 case MBX_UPDATE_CFG: /* 0x1B */
661 case MBX_DOWN_LOAD: /* 0x1C */
662 case MBX_DEL_LD_ENTRY: /* 0x1D */
663 case MBX_LOAD_AREA: /* 0x81 */
664 case MBX_FLASH_WR_ULA: /* 0x98 */
665 case MBX_LOAD_EXP_ROM: /* 0x9C */
666 return LPFC_MBOX_TMO_FLASH_CMD;
667 }
668 return LPFC_MBOX_TMO;
669}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd0b0e293d63..20449a8dd53d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
179 179
180 /* Abort outstanding I/O on NPort <nlp_DID> */ 180 /* Abort outstanding I/O on NPort <nlp_DID> */
181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
182 "%d:0201 Abort outstanding I/O on NPort x%x " 182 "%d:0205 Abort outstanding I/O on NPort x%x "
183 "Data: x%x x%x x%x\n", 183 "Data: x%x x%x x%x\n",
184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
185 ndlp->nlp_state, ndlp->nlp_rpi); 185 ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
393 mbox->context2 = ndlp; 393 mbox->context2 = ndlp;
394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
395 395
396 /*
397 * If there is an outstanding PLOGI issued, abort it before
398 * sending ACC rsp for received PLOGI. If pending plogi
399 * is not canceled here, the plogi will be rejected by
400 * remote port and will be retried. On a configuration with
401 * single discovery thread, this will cause a huge delay in
402 * discovery. Also this will cause multiple state machines
403 * running in parallel for this node.
404 */
405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
406 /* software abort outstanding PLOGI */
407 lpfc_els_abort(phba, ndlp, 1);
408 }
409
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return 1; 411 return 1;
398 412
@@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1601 1615
1602 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1616 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1603 1617
1604 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1618 /*
1619 * Do not start discovery if discovery is about to start
1620 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads.
1622 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1605 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1606 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1607 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a760a44173df..a8816a8738f8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/delay.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
@@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
841 return 0; 842 return 0;
842} 843}
843 844
845static void
846lpfc_block_error_handler(struct scsi_cmnd *cmnd)
847{
848 struct Scsi_Host *shost = cmnd->device->host;
849 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
850
851 spin_lock_irq(shost->host_lock);
852 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
853 spin_unlock_irq(shost->host_lock);
854 msleep(1000);
855 spin_lock_irq(shost->host_lock);
856 }
857 spin_unlock_irq(shost->host_lock);
858 return;
859}
844 860
845static int 861static int
846lpfc_abort_handler(struct scsi_cmnd *cmnd) 862lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
855 unsigned int loop_count = 0; 871 unsigned int loop_count = 0;
856 int ret = SUCCESS; 872 int ret = SUCCESS;
857 873
874 lpfc_block_error_handler(cmnd);
858 spin_lock_irq(shost->host_lock); 875 spin_lock_irq(shost->host_lock);
859 876
860 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 877 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
957 int ret = FAILED; 974 int ret = FAILED;
958 int cnt, loopcnt; 975 int cnt, loopcnt;
959 976
977 lpfc_block_error_handler(cmnd);
960 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
961 /* 979 /*
962 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
@@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1073 int cnt, loopcnt; 1091 int cnt, loopcnt;
1074 struct lpfc_scsi_buf * lpfc_cmd; 1092 struct lpfc_scsi_buf * lpfc_cmd;
1075 1093
1094 lpfc_block_error_handler(cmnd);
1076 spin_lock_irq(shost->host_lock); 1095 spin_lock_irq(shost->host_lock);
1077 1096
1078 lpfc_cmd = lpfc_get_scsi_buf(phba); 1097 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 ndlp->rport->dd_data); 1123 ndlp->rport->dd_data);
1105 if (ret != SUCCESS) { 1124 if (ret != SUCCESS) {
1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1125 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1107 "%d:0713 Bus Reset on target %d failed\n", 1126 "%d:0700 Bus Reset on target %d failed\n",
1108 phba->brd_no, i); 1127 phba->brd_no, i);
1109 err_count++; 1128 err_count++;
1110 } 1129 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 350a625fa224..70f4d5a1348e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
320 kfree(old_arr); 320 kfree(old_arr);
321 return iotag; 321 return iotag;
322 } 322 }
323 } 323 } else
324 spin_unlock_irq(phba->host->host_lock);
324 325
325 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 326 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
326 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 327 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
969 * resources need to be recovered. 970 * resources need to be recovered.
970 */ 971 */
971 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 972 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
972 printk(KERN_INFO "%s: IOCB cmd 0x%x processed." 973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
973 " Skipping completion\n", __FUNCTION__, 974 "%d:0314 IOCB cmd 0x%x"
974 irsp->ulpCommand); 975 " processed. Skipping"
976 " completion", phba->brd_no,
977 irsp->ulpCommand);
975 break; 978 break;
976 } 979 }
977 980
@@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1104 if (unlikely(irsp->ulpStatus)) { 1107 if (unlikely(irsp->ulpStatus)) {
1105 /* Rsp ring <ringno> error: IOCB */ 1108 /* Rsp ring <ringno> error: IOCB */
1106 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1109 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1107 "%d:0326 Rsp Ring %d error: IOCB Data: " 1110 "%d:0336 Rsp Ring %d error: IOCB Data: "
1108 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1111 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1109 phba->brd_no, pring->ringno, 1112 phba->brd_no, pring->ringno,
1110 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1113 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1122 * resources need to be recovered. 1125 * resources need to be recovered.
1123 */ 1126 */
1124 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1127 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1125 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 1128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1126 "Skipping completion\n", __FUNCTION__, 1129 "%d:0333 IOCB cmd 0x%x"
1127 irsp->ulpCommand); 1130 " processed. Skipping"
1131 " completion\n", phba->brd_no,
1132 irsp->ulpCommand);
1128 break; 1133 break;
1129 } 1134 }
1130 1135
@@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1155 } else { 1160 } else {
1156 /* Unknown IOCB command */ 1161 /* Unknown IOCB command */
1157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1158 "%d:0321 Unknown IOCB command " 1163 "%d:0334 Unknown IOCB command "
1159 "Data: x%x, x%x x%x x%x x%x\n", 1164 "Data: x%x, x%x x%x x%x x%x\n",
1160 phba->brd_no, type, irsp->ulpCommand, 1165 phba->brd_no, type, irsp->ulpCommand,
1161 irsp->ulpStatus, irsp->ulpIoTag, 1166 irsp->ulpStatus, irsp->ulpIoTag,
@@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1238 lpfc_printf_log(phba, 1243 lpfc_printf_log(phba,
1239 KERN_ERR, 1244 KERN_ERR,
1240 LOG_SLI, 1245 LOG_SLI,
1241 "%d:0312 Ring %d handler: portRspPut %d " 1246 "%d:0303 Ring %d handler: portRspPut %d "
1242 "is bigger then rsp ring %d\n", 1247 "is bigger then rsp ring %d\n",
1243 phba->brd_no, 1248 phba->brd_no,
1244 pring->ringno, portRspPut, portRspMax); 1249 pring->ringno, portRspPut, portRspMax);
@@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1383 lpfc_printf_log(phba, 1388 lpfc_printf_log(phba,
1384 KERN_ERR, 1389 KERN_ERR,
1385 LOG_SLI, 1390 LOG_SLI,
1386 "%d:0321 Unknown IOCB command " 1391 "%d:0335 Unknown IOCB command "
1387 "Data: x%x x%x x%x x%x\n", 1392 "Data: x%x x%x x%x x%x\n",
1388 phba->brd_no, 1393 phba->brd_no,
1389 irsp->ulpCommand, 1394 irsp->ulpCommand,
@@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1399 next_iocb, 1404 next_iocb,
1400 &saveq->list, 1405 &saveq->list,
1401 list) { 1406 list) {
1407 list_del(&rspiocbp->list);
1402 lpfc_sli_release_iocbq(phba, 1408 lpfc_sli_release_iocbq(phba,
1403 rspiocbp); 1409 rspiocbp);
1404 } 1410 }
1405 } 1411 }
1406
1407 lpfc_sli_release_iocbq(phba, saveq); 1412 lpfc_sli_release_iocbq(phba, saveq);
1408 } 1413 }
1409 } 1414 }
@@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1711 phba->fc_myDID = 0; 1716 phba->fc_myDID = 0;
1712 phba->fc_prevDID = 0; 1717 phba->fc_prevDID = 0;
1713 1718
1714 psli->sli_flag = 0;
1715
1716 /* Turn off parity checking and serr during the physical reset */ 1719 /* Turn off parity checking and serr during the physical reset */
1717 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1720 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1718 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1721 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1719 (cfg_value & 1722 (cfg_value &
1720 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1723 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1721 1724
1722 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1725 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1723 /* Now toggle INITFF bit in the Host Control Register */ 1726 /* Now toggle INITFF bit in the Host Control Register */
1724 writel(HC_INITFF, phba->HCregaddr); 1727 writel(HC_INITFF, phba->HCregaddr);
1725 mdelay(1); 1728 mdelay(1);
@@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1760 1763
1761 /* Restart HBA */ 1764 /* Restart HBA */
1762 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1763 "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, 1766 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1764 phba->hba_state, psli->sli_flag); 1767 phba->hba_state, psli->sli_flag);
1765 1768
1766 word0 = 0; 1769 word0 = 0;
@@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1792 1795
1793 spin_unlock_irq(phba->host->host_lock); 1796 spin_unlock_irq(phba->host->host_lock);
1794 1797
1798 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1799 psli->stats_start = get_seconds();
1800
1795 if (skip_post) 1801 if (skip_post)
1796 mdelay(100); 1802 mdelay(100);
1797 else 1803 else
@@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1902 } 1908 }
1903 1909
1904 while (resetcount < 2 && !done) { 1910 while (resetcount < 2 && !done) {
1911 spin_lock_irq(phba->host->host_lock);
1912 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1913 spin_unlock_irq(phba->host->host_lock);
1905 phba->hba_state = LPFC_STATE_UNKNOWN; 1914 phba->hba_state = LPFC_STATE_UNKNOWN;
1906 lpfc_sli_brdrestart(phba); 1915 lpfc_sli_brdrestart(phba);
1907 msleep(2500); 1916 msleep(2500);
@@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1909 if (rc) 1918 if (rc)
1910 break; 1919 break;
1911 1920
1921 spin_lock_irq(phba->host->host_lock);
1922 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1923 spin_unlock_irq(phba->host->host_lock);
1912 resetcount++; 1924 resetcount++;
1913 1925
1914 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1926 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2194 return (MBX_NOT_FINISHED); 2206 return (MBX_NOT_FINISHED);
2195 } 2207 }
2196 /* timeout active mbox command */ 2208 /* timeout active mbox command */
2197 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 2209 mod_timer(&psli->mbox_tmo, (jiffies +
2210 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2198 } 2211 }
2199 2212
2200 /* Mailbox cmd <cmd> issue */ 2213 /* Mailbox cmd <cmd> issue */
@@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2254 break; 2267 break;
2255 2268
2256 case MBX_POLL: 2269 case MBX_POLL:
2257 i = 0;
2258 psli->mbox_active = NULL; 2270 psli->mbox_active = NULL;
2259 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2271 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2260 /* First read mbox status word */ 2272 /* First read mbox status word */
@@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2268 /* Read the HBA Host Attention Register */ 2280 /* Read the HBA Host Attention Register */
2269 ha_copy = readl(phba->HAregaddr); 2281 ha_copy = readl(phba->HAregaddr);
2270 2282
2283 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2284 i *= 1000; /* Convert to ms */
2285
2271 /* Wait for command to complete */ 2286 /* Wait for command to complete */
2272 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2287 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2273 (!(ha_copy & HA_MBATT) && 2288 (!(ha_copy & HA_MBATT) &&
2274 (phba->hba_state > LPFC_WARM_START))) { 2289 (phba->hba_state > LPFC_WARM_START))) {
2275 if (i++ >= 100) { 2290 if (i-- <= 0) {
2276 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2277 spin_unlock_irqrestore(phba->host->host_lock, 2292 spin_unlock_irqrestore(phba->host->host_lock,
2278 drvr_flag); 2293 drvr_flag);
@@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2290 2305
2291 /* Can be in interrupt context, do not sleep */ 2306 /* Can be in interrupt context, do not sleep */
2292 /* (or might be called with interrupts disabled) */ 2307 /* (or might be called with interrupts disabled) */
2293 mdelay(i); 2308 mdelay(1);
2294 2309
2295 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2310 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2296 2311
@@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3005 3020
3006 if (timeleft == 0) { 3021 if (timeleft == 0) {
3007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3008 "%d:0329 IOCB wait timeout error - no " 3023 "%d:0338 IOCB wait timeout error - no "
3009 "wake response Data x%x\n", 3024 "wake response Data x%x\n",
3010 phba->brd_no, timeout); 3025 phba->brd_no, timeout);
3011 retval = IOCB_TIMEDOUT; 3026 retval = IOCB_TIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index d8ef0d2894d4..e26de6809358 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -172,6 +172,18 @@ struct lpfc_sli_stat {
172 uint32_t mbox_busy; /* Mailbox cmd busy */ 172 uint32_t mbox_busy; /* Mailbox cmd busy */
173}; 173};
174 174
175/* Structure to store link status values when port stats are reset */
176struct lpfc_lnk_stat {
177 uint32_t link_failure_count;
178 uint32_t loss_of_sync_count;
179 uint32_t loss_of_signal_count;
180 uint32_t prim_seq_protocol_err_count;
181 uint32_t invalid_tx_word_count;
182 uint32_t invalid_crc_count;
183 uint32_t error_frames;
184 uint32_t link_events;
185};
186
175/* Structure used to hold SLI information */ 187/* Structure used to hold SLI information */
176struct lpfc_sli { 188struct lpfc_sli {
177 uint32_t num_rings; 189 uint32_t num_rings;
@@ -201,6 +213,8 @@ struct lpfc_sli {
201 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ 213 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
202 size_t iocbq_lookup_len; /* current lengs of the array */ 214 size_t iocbq_lookup_len; /* current lengs of the array */
203 uint16_t last_iotag; /* last allocated IOTAG */ 215 uint16_t last_iotag; /* last allocated IOTAG */
216 unsigned long stats_start; /* in seconds */
217 struct lpfc_lnk_stat lnk_stat_offsets;
204}; 218};
205 219
206/* Given a pointer to the start of the ring, and the slot number of 220/* Given a pointer to the start of the ring, and the slot number of
@@ -211,3 +225,9 @@ struct lpfc_sli {
211 225
212#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 226#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
213 command */ 227 command */
228#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
229 * or erase cmds. This is especially
230 * long because of the potential of
231 * multiple flash erases that can be
232 * spawned.
233 */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 10e89c6ae823..c7091ea29f3f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.7" 21#define LPFC_DRIVER_VERSION "8.1.9"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343228ad..8cd0bd1d0f7c 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
37#define LSI_MAX_CHANNELS 16 37#define LSI_MAX_CHANNELS 16
38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) 38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
39 39
40#define HBA_SIGNATURE_64_BIT 0x299
41#define PCI_CONF_AMISIG64 0xa4
42
43#define MEGA_SCSI_INQ_EVPD 1
44#define MEGA_INVALID_FIELD_IN_CDB 0x24
45
40 46
41/** 47/**
42 * scb_t - scsi command control block 48 * scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee144a1c3..b8aa34202ec3 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
132/* Driver Data: */ 132/* Driver Data: */
133 void __user * user_data; 133 void __user * user_data;
134 uint32_t user_data_len; 134 uint32_t user_data_len;
135
136 /* 64bit alignment */
137 uint32_t pad_for_64bit_align;
138
135 mraid_passthru_t __user *user_pthru; 139 mraid_passthru_t __user *user_pthru;
136 140
137 mraid_passthru_t *pthru32; 141 mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 92715130ac09..cd982c877da0 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.8 (Apr 11 2006) 13 * Version : v2.20.4.9 (Jul 16 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter)
720 struct pci_dev *pdev; 720 struct pci_dev *pdev;
721 mraid_device_t *raid_dev; 721 mraid_device_t *raid_dev;
722 int i; 722 int i;
723 uint32_t magic64;
723 724
724 725
725 adapter->ito = MBOX_TIMEOUT; 726 adapter->ito = MBOX_TIMEOUT;
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter)
863 864
864 // Set the DMA mask to 64-bit. All supported controllers as capable of 865 // Set the DMA mask to 64-bit. All supported controllers as capable of
865 // DMA in this range 866 // DMA in this range
866 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { 867 pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
867 868
868 con_log(CL_ANN, (KERN_WARNING 869 if (((magic64 == HBA_SIGNATURE_64_BIT) &&
869 "megaraid: could not set DMA mask for 64-bit.\n")); 870 ((adapter->pdev->subsystem_device !=
871 PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
872 (adapter->pdev->subsystem_device !=
873 PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
874 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
875 adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
876 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
877 adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
878 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
879 adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
880 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
881 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
882 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
883 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
884 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
885 con_log(CL_ANN, (KERN_WARNING
886 "megaraid: DMA mask for 64-bit failed\n"));
870 887
871 goto out_free_sysfs_res; 888 if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
889 con_log(CL_ANN, (KERN_WARNING
890 "megaraid: 32-bit DMA mask failed\n"));
891 goto out_free_sysfs_res;
892 }
893 }
872 } 894 }
873 895
874 // setup tasklet for DPC 896 // setup tasklet for DPC
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1622 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1644 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1623 } 1645 }
1624 1646
1647 if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1648 scp->sense_buffer[0] = 0x70;
1649 scp->sense_buffer[2] = ILLEGAL_REQUEST;
1650 scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1651 scp->result = CHECK_CONDITION << 1;
1652 return NULL;
1653 }
1654
1625 /* Fall through */ 1655 /* Fall through */
1626 1656
1627 case READ_CAPACITY: 1657 case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0ec93e7..2b5a3285f799 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.8" 24#define MEGARAID_VERSION "2.20.4.9"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" 25#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534fb336b..d85b9a8f1b8d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mm.c 12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.6 (Mar 7 2005) 13 * Version : v2.20.2.7 (Jul 16 2006)
14 * 14 *
15 * Common management module 15 * Common management module
16 */ 16 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d6849d..c8762b2b8ed1 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
27#include "megaraid_ioctl.h" 27#include "megaraid_ioctl.h"
28 28
29 29
30#define LSI_COMMON_MOD_VERSION "2.20.2.6" 30#define LSI_COMMON_MOD_VERSION "2.20.2.7"
31#define LSI_COMMON_MOD_EXT_VERSION \ 31#define LSI_COMMON_MOD_EXT_VERSION \
32 "(Release Date: Mon Mar 7 00:01:03 EST 2005)" 32 "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
33 33
34 34
35#define LSI_DBGLVL dbglevel 35#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index d1f38c32aa15..efc8fff1d250 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = {
183 { 183 {
184 .sht = &adma_ata_sht, 184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, 186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
187 .pio_mask = 0x10, /* pio4 */ 188 .pio_mask = 0x10, /* pio4 */
188 .udma_mask = 0x1f, /* udma0-4 */ 189 .udma_mask = 0x1f, /* udma0-4 */
189 .port_ops = &adma_ata_ops, 190 .port_ops = &adma_ata_ops,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 139ea0e27fd7..0930260aec2c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ 487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ 488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
489 /* used. */ 489 /* used. */
490#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
490#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ 491#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
491#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ 492#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
492#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ 493#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9758dba95542..859649160caa 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3063int 3063int
3064qla2x00_abort_isp(scsi_qla_host_t *ha) 3064qla2x00_abort_isp(scsi_qla_host_t *ha)
3065{ 3065{
3066 int rval;
3066 unsigned long flags = 0; 3067 unsigned long flags = 0;
3067 uint16_t cnt; 3068 uint16_t cnt;
3068 srb_t *sp; 3069 srb_t *sp;
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3119 3120
3120 ha->isp_abort_cnt = 0; 3121 ha->isp_abort_cnt = 0;
3121 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3122 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3123
3124 if (ha->eft) {
3125 rval = qla2x00_trace_control(ha, TC_ENABLE,
3126 ha->eft_dma, EFT_NUM_BUFFERS);
3127 if (rval) {
3128 qla_printk(KERN_WARNING, ha,
3129 "Unable to reinitialize EFT "
3130 "(%d).\n", rval);
3131 }
3132 }
3122 } else { /* failed the ISP abort */ 3133 } else { /* failed the ISP abort */
3123 ha->flags.online = 1; 3134 ha->flags.online = 1;
3124 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3135 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2b60a27eff0b..c5b3c610a32a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
471 mrk24->nport_handle = cpu_to_le16(loop_id); 471 mrk24->nport_handle = cpu_to_le16(loop_id);
472 mrk24->lun[1] = LSB(lun); 472 mrk24->lun[1] = LSB(lun);
473 mrk24->lun[2] = MSB(lun); 473 mrk24->lun[2] = MSB(lun);
474 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
474 } else { 475 } else {
475 SET_TARGET_ID(ha, mrk->target, loop_id); 476 SET_TARGET_ID(ha, mrk->target, loop_id);
476 mrk->lun = cpu_to_le16(lun); 477 mrk->lun = cpu_to_le16(lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 795bf15b1b8f..de0613135f70 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
589 break; 589 break;
590
591 case MBA_TRACE_NOTIFICATION:
592 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
593 ha->host_no, mb[1], mb[2]));
594 break;
590 } 595 }
591} 596}
592 597
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ec7ebb6037e6..65cbe2f5eea2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
744{ 744{
745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
747 srb_t *sp;
748 int ret; 747 int ret;
749 unsigned int id, lun; 748 unsigned int id, lun;
750 unsigned long serial; 749 unsigned long serial;
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
755 lun = cmd->device->lun; 754 lun = cmd->device->lun;
756 serial = cmd->serial_number; 755 serial = cmd->serial_number;
757 756
758 sp = (srb_t *) CMD_SP(cmd); 757 if (!fcport)
759 if (!sp || !fcport)
760 return ret; 758 return ret;
761 759
762 qla_printk(KERN_INFO, ha, 760 qla_printk(KERN_INFO, ha,
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
875{ 873{
876 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 874 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
877 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 875 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
878 srb_t *sp;
879 int ret; 876 int ret;
880 unsigned int id, lun; 877 unsigned int id, lun;
881 unsigned long serial; 878 unsigned long serial;
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
886 lun = cmd->device->lun; 883 lun = cmd->device->lun;
887 serial = cmd->serial_number; 884 serial = cmd->serial_number;
888 885
889 sp = (srb_t *) CMD_SP(cmd); 886 if (!fcport)
890 if (!sp || !fcport)
891 return ret; 887 return ret;
892 888
893 qla_printk(KERN_INFO, ha, 889 qla_printk(KERN_INFO, ha,
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
936{ 932{
937 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 933 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
938 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 934 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
939 srb_t *sp;
940 int ret; 935 int ret;
941 unsigned int id, lun; 936 unsigned int id, lun;
942 unsigned long serial; 937 unsigned long serial;
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
947 lun = cmd->device->lun; 942 lun = cmd->device->lun;
948 serial = cmd->serial_number; 943 serial = cmd->serial_number;
949 944
950 sp = (srb_t *) CMD_SP(cmd); 945 if (!fcport)
951 if (!sp || !fcport)
952 return ret; 946 return ret;
953 947
954 qla_printk(KERN_INFO, ha, 948 qla_printk(KERN_INFO, ha,
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data)
2244 2238
2245 next_loopid = 0; 2239 next_loopid = 0;
2246 list_for_each_entry(fcport, &ha->fcports, list) { 2240 list_for_each_entry(fcport, &ha->fcports, list) {
2247 if (fcport->port_type != FCT_TARGET)
2248 continue;
2249
2250 /* 2241 /*
2251 * If the port is not ONLINE then try to login 2242 * If the port is not ONLINE then try to login
2252 * to it if we haven't run out of retries. 2243 * to it if we haven't run out of retries.
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d2d683440659..971259032ef7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.05-k3" 10#define QLA2XXX_VERSION "8.01.07-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
14#define QLA_DRIVER_PATCH_VER 5 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 03baec2191bf..01d40369a8a5 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -74,6 +74,7 @@ enum {
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); 75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
77 78
78static const struct pci_device_id svia_pci_tbl[] = { 79static const struct pci_device_id svia_pci_tbl[] = {
79 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, 80 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
@@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = {
107 .bios_param = ata_std_bios_param, 108 .bios_param = ata_std_bios_param,
108}; 109};
109 110
110static const struct ata_port_operations svia_sata_ops = { 111static const struct ata_port_operations vt6420_sata_ops = {
112 .port_disable = ata_port_disable,
113
114 .tf_load = ata_tf_load,
115 .tf_read = ata_tf_read,
116 .check_status = ata_check_status,
117 .exec_command = ata_exec_command,
118 .dev_select = ata_std_dev_select,
119
120 .bmdma_setup = ata_bmdma_setup,
121 .bmdma_start = ata_bmdma_start,
122 .bmdma_stop = ata_bmdma_stop,
123 .bmdma_status = ata_bmdma_status,
124
125 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot,
127 .data_xfer = ata_pio_data_xfer,
128
129 .freeze = ata_bmdma_freeze,
130 .thaw = ata_bmdma_thaw,
131 .error_handler = vt6420_error_handler,
132 .post_internal_cmd = ata_bmdma_post_internal_cmd,
133
134 .irq_handler = ata_interrupt,
135 .irq_clear = ata_bmdma_irq_clear,
136
137 .port_start = ata_port_start,
138 .port_stop = ata_port_stop,
139 .host_stop = ata_host_stop,
140};
141
142static const struct ata_port_operations vt6421_sata_ops = {
111 .port_disable = ata_port_disable, 143 .port_disable = ata_port_disable,
112 144
113 .tf_load = ata_tf_load, 145 .tf_load = ata_tf_load,
@@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = {
141 .host_stop = ata_host_stop, 173 .host_stop = ata_host_stop,
142}; 174};
143 175
144static struct ata_port_info svia_port_info = { 176static struct ata_port_info vt6420_port_info = {
145 .sht = &svia_sht, 177 .sht = &svia_sht,
146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 178 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
147 .pio_mask = 0x1f, 179 .pio_mask = 0x1f,
148 .mwdma_mask = 0x07, 180 .mwdma_mask = 0x07,
149 .udma_mask = 0x7f, 181 .udma_mask = 0x7f,
150 .port_ops = &svia_sata_ops, 182 .port_ops = &vt6420_sata_ops,
151}; 183};
152 184
153MODULE_AUTHOR("Jeff Garzik"); 185MODULE_AUTHOR("Jeff Garzik");
@@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
170 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 202 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
171} 203}
172 204
205/**
206 * vt6420_prereset - prereset for vt6420
207 * @ap: target ATA port
208 *
209 * SCR registers on vt6420 are pieces of shit and may hang the
210 * whole machine completely if accessed with the wrong timing.
211 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
212 * access operations, but uses SStatus and SControl only during
213 * boot probing in controlled way.
214 *
215 * As the old (pre EH update) probing code is proven to work, we
216 * strictly follow the access pattern.
217 *
218 * LOCKING:
219 * Kernel thread context (may sleep)
220 *
221 * RETURNS:
222 * 0 on success, -errno otherwise.
223 */
224static int vt6420_prereset(struct ata_port *ap)
225{
226 struct ata_eh_context *ehc = &ap->eh_context;
227 unsigned long timeout = jiffies + (HZ * 5);
228 u32 sstatus, scontrol;
229 int online;
230
231 /* don't do any SCR stuff if we're not loading */
232 if (!ATA_PFLAG_LOADING)
233 goto skip_scr;
234
235 /* Resume phy. This is the old resume sequence from
236 * __sata_phy_reset().
237 */
238 svia_scr_write(ap, SCR_CONTROL, 0x300);
239 svia_scr_read(ap, SCR_CONTROL); /* flush */
240
241 /* wait for phy to become ready, if necessary */
242 do {
243 msleep(200);
244 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
245 break;
246 } while (time_before(jiffies, timeout));
247
248 /* open code sata_print_link_status() */
249 sstatus = svia_scr_read(ap, SCR_STATUS);
250 scontrol = svia_scr_read(ap, SCR_CONTROL);
251
252 online = (sstatus & 0xf) == 0x3;
253
254 ata_port_printk(ap, KERN_INFO,
255 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
256 online ? "up" : "down", sstatus, scontrol);
257
258 /* SStatus is read one more time */
259 svia_scr_read(ap, SCR_STATUS);
260
261 if (!online) {
262 /* tell EH to bail */
263 ehc->i.action &= ~ATA_EH_RESET_MASK;
264 return 0;
265 }
266
267 skip_scr:
268 /* wait for !BSY */
269 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
270
271 return 0;
272}
273
274static void vt6420_error_handler(struct ata_port *ap)
275{
276 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
277 NULL, ata_std_postreset);
278}
279
173static const unsigned int svia_bar_sizes[] = { 280static const unsigned int svia_bar_sizes[] = {
174 8, 4, 8, 4, 16, 256 281 8, 4, 8, 4, 16, 256
175}; 282};
@@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
210static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) 317static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
211{ 318{
212 struct ata_probe_ent *probe_ent; 319 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 320 struct ata_port_info *ppi = &vt6420_port_info;
214 321
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 322 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 323 if (!probe_ent)
@@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
239 346
240 probe_ent->sht = &svia_sht; 347 probe_ent->sht = &svia_sht;
241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 348 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
242 probe_ent->port_ops = &svia_sata_ops; 349 probe_ent->port_ops = &vt6421_sata_ops;
243 probe_ent->n_ports = N_PORTS; 350 probe_ent->n_ports = N_PORTS;
244 probe_ent->irq = pdev->irq; 351 probe_ent->irq = pdev->irq;
245 probe_ent->irq_flags = IRQF_SHARED; 352 probe_ent->irq_flags = IRQF_SHARED;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a5b731bd5ba..a8ed5a22009d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
464 int cmnd_size, int timeout, int copy_sense)
464{ 465{
465 struct scsi_device *sdev = scmd->device; 466 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 467 struct Scsi_Host *shost = sdev->host;
@@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense
490 old_cmd_len = scmd->cmd_len; 491 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg; 492 old_use_sg = scmd->use_sg;
492 493
494 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
495 memcpy(scmd->cmnd, cmnd, cmnd_size);
496
493 if (copy_sense) { 497 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC; 498 int gfp_mask = GFP_ATOMIC;
495 499
@@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
610 static unsigned char generic_sense[6] = 614 static unsigned char generic_sense[6] =
611 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 615 {REQUEST_SENSE, 0, 0, 0, 252, 0};
612 616
613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 617 return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
615} 618}
616 619
617/** 620/**
@@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
736 int retry_cnt = 1, rtn; 739 int retry_cnt = 1, rtn;
737 740
738retry_tur: 741retry_tur:
739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 742 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
740
741
742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
743 743
744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
745 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
@@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
839 if (scmd->device->allow_restart) { 839 if (scmd->device->allow_restart) {
840 int rtn; 840 int rtn;
841 841
842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 842 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); 843 START_UNIT_TIMEOUT, 0);
844 if (rtn == SUCCESS) 844 if (rtn == SUCCESS)
845 return 0; 845 return 0;
846 } 846 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7b9e8fa1a4e0..2ecd14188574 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "1.1-646"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone)
634} 635}
635 636
636static int 637static int
637iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) 638iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
638{ 639{
639 unsigned long flags; 640 unsigned long flags;
640 int rc; 641 int rc;
641 642
642 skb_get(skb); 643 skb_get(skb);
643 rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); 644 rc = netlink_broadcast(nls, skb, 0, 1, gfp);
644 if (rc < 0) { 645 if (rc < 0) {
645 mempool_free(skb, zone->pool); 646 mempool_free(skb, zone->pool);
646 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); 647 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
749 ev->r.connerror.cid = conn->cid; 750 ev->r.connerror.cid = conn->cid;
750 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 751 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
751 752
752 iscsi_broadcast_skb(conn->z_error, skb); 753 iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
753 754
754 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 755 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
755 error); 756 error);
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
895 * this will occur if the daemon is not up, so we just warn 896 * this will occur if the daemon is not up, so we just warn
896 * the user and when the daemon is restarted it will handle it 897 * the user and when the daemon is restarted it will handle it
897 */ 898 */
898 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 899 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
899 if (rc < 0) 900 if (rc < 0)
900 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 901 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
901 "session destruction event. Check iscsi daemon\n"); 902 "session destruction event. Check iscsi daemon\n");
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
958 * this will occur if the daemon is not up, so we just warn 959 * this will occur if the daemon is not up, so we just warn
959 * the user and when the daemon is restarted it will handle it 960 * the user and when the daemon is restarted it will handle it
960 */ 961 */
961 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 962 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
962 if (rc < 0) 963 if (rc < 0)
963 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 964 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
964 "session creation event. Check iscsi daemon\n"); 965 "session creation event. Check iscsi daemon\n");
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void)
1613{ 1614{
1614 int err; 1615 int err;
1615 1616
1617 printk(KERN_INFO "Loading iSCSI transport class v%s.",
1618 ISCSI_TRANSPORT_VERSION);
1619
1616 err = class_register(&iscsi_transport_class); 1620 err = class_register(&iscsi_transport_class);
1617 if (err) 1621 if (err)
1618 return err; 1622 return err;
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
1678 "Alex Aizman <itn780@yahoo.com>"); 1682 "Alex Aizman <itn780@yahoo.com>");
1679MODULE_DESCRIPTION("iSCSI Transport Interface"); 1683MODULE_DESCRIPTION("iSCSI Transport Interface");
1680MODULE_LICENSE("GPL"); 1684MODULE_LICENSE("GPL");
1685MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 65eef33846bb..34f9343ed0af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21static int sg_version_num = 30533; /* 2 digits for each component */ 21static int sg_version_num = 30534; /* 2 digits for each component */
22#define SG_VERSION_STR "3.5.33" 22#define SG_VERSION_STR "3.5.34"
23 23
24/* 24/*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
60 60
61#ifdef CONFIG_SCSI_PROC_FS 61#ifdef CONFIG_SCSI_PROC_FS
62#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
63static char *sg_version_date = "20050908"; 63static char *sg_version_date = "20060818";
64 64
65static int sg_proc_init(void); 65static int sg_proc_init(void);
66static void sg_proc_cleanup(void); 66static void sg_proc_cleanup(void);
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1164 len = vma->vm_end - sa; 1164 len = vma->vm_end - sa;
1165 len = (len < sg->length) ? len : sg->length; 1165 len = (len < sg->length) ? len : sg->length;
1166 if (offset < len) { 1166 if (offset < len) {
1167 page = sg->page; 1167 page = virt_to_page(page_address(sg->page) + offset);
1168 get_page(page); /* increment page count */ 1168 get_page(page); /* increment page count */
1169 break; 1169 break;
1170 } 1170 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8c505076c0eb..739d3ef46a40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
2087 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2087 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index dc673e1b6fd9..cfe20f730436 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options)
886 unsigned long flags; 886 unsigned long flags;
887 unsigned int baud, quot; 887 unsigned int baud, quot;
888 888
889 /*
890 * The console framework calls us for each and every port
891 * registered. Defer the console setup until the requested
892 * port has been properly discovered. A bit of a hack,
893 * though...
894 */
895 if (up->port.type != PORT_SUNSAB)
896 return -1;
897
889 printk("Console: ttyS%d (SAB82532)\n", 898 printk("Console: ttyS%d (SAB82532)\n",
890 (sunsab_reg.minor - 64) + con->index); 899 (sunsab_reg.minor - 64) + con->index);
891 900
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 47bc3d57e019..d34f336d53d8 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1146 unsigned long flags; 1146 unsigned long flags;
1147 int baud, brg; 1147 int baud, brg;
1148 1148
1149 if (up->port.type != PORT_SUNZILOG)
1150 return -1;
1151
1149 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", 1152 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
1150 (sunzilog_reg.minor - 64) + con->index, con->index); 1153 (sunzilog_reg.minor - 64) + con->index, con->index);
1151 1154
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index a4062a6adbb8..9c46746d5d00 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface,
208 /* allocate memory for our device state and initialize it */ 208 /* allocate memory for our device state and initialize it */
209 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 209 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
210 if (dev == NULL) { 210 if (dev == NULL) {
211 dev_err(&dev->udev->dev, "Out of memory!\n"); 211 dev_err(&interface->dev, "Out of memory!\n");
212 goto error; 212 goto error;
213 } 213 }
214 214
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index efbbc0adb89a..65e4d046951a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -79,7 +79,6 @@ static struct usb_device_id id_table [] = {
79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, 79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, 80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, 81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
82 { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) },
83 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, 82 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
84 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, 83 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
85 { } /* Terminating entry */ 84 { } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a692ac66ca6c..55195e76eb6f 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -82,10 +82,6 @@
82#define SPEEDDRAGON_VENDOR_ID 0x0e55 82#define SPEEDDRAGON_VENDOR_ID 0x0e55
83#define SPEEDDRAGON_PRODUCT_ID 0x110b 83#define SPEEDDRAGON_PRODUCT_ID 0x110b
84 84
85/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */
86#define OTI_VENDOR_ID 0x0ea0
87#define OTI_PRODUCT_ID 0x6858
88
89/* DATAPILOT Universal-2 Phone Cable */ 85/* DATAPILOT Universal-2 Phone Cable */
90#define DATAPILOT_U2_VENDOR_ID 0x0731 86#define DATAPILOT_U2_VENDOR_ID 0x0731
91#define DATAPILOT_U2_PRODUCT_ID 0x2003 87#define DATAPILOT_U2_PRODUCT_ID 0x2003
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fd158e063c06..4a803d69fa36 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1261,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1261 * Tested on hardware version 1.10. 1261 * Tested on hardware version 1.10.
1262 * Entry is needed only for the initializer function override. 1262 * Entry is needed only for the initializer function override.
1263 */ 1263 */
1264UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, 1264UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110,
1265 "Desknote", 1265 "Desknote",
1266 "UCR-61S2B", 1266 "UCR-61S2B",
1267 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1267 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
index b485bece5fc9..18ea4a549105 100644
--- a/drivers/video/imacfb.c
+++ b/drivers/video/imacfb.c
@@ -71,10 +71,10 @@ static int set_system(struct dmi_system_id *id)
71static struct dmi_system_id __initdata dmi_system_table[] = { 71static struct dmi_system_id __initdata dmi_system_table[] = {
72 { set_system, "iMac4,1", { 72 { set_system, "iMac4,1", {
73 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 73 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
74 DMI_MATCH(DMI_BIOS_VERSION,"iMac4,1") }, (void*)M_I17}, 74 DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17},
75 { set_system, "MacBookPro1,1", { 75 { set_system, "MacBookPro1,1", {
76 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 76 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
77 DMI_MATCH(DMI_BIOS_VERSION,"MacBookPro1,1") }, (void*)M_I17}, 77 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17},
78 { set_system, "MacBook1,1", { 78 { set_system, "MacBook1,1", {
79 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 79 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
80 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK}, 80 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK},
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 440272ad10e7..7c76e079ca7d 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
331 tmp |= M1064_XPIXCLKCTRL_PLL_UP; 331 tmp |= M1064_XPIXCLKCTRL_PLL_UP;
332 } 332 }
333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); 333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
334#ifdef __powerpc__
335 /* This is necessary to avoid jitter on PowerPC
336 * (OpenFirmware) systems, but apparently
337 * introduces jitter, at least on a x86-64
338 * using DVI.
339 * A simple workaround is disable for non-PPC.
340 */
334 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); 341 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0);
342#endif /* __powerpc__ */
335 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); 343 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl);
336 344
337 matroxfb_DAC_unlock_irqrestore(flags); 345 matroxfb_DAC_unlock_irqrestore(flags);