diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-26 00:06:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-26 00:06:13 -0400 |
commit | 9f34217c846a96dea03f4418e2f27423658d3542 (patch) | |
tree | 5b137af50db5758261700015911afb197ac8fc9f /drivers/scsi | |
parent | 95e14ed7fc4b2db62eb597a70850a0fede48b78a (diff) | |
parent | 3703b2c5d041a68095cdd22380c23ce27d449ad7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (55 commits)
[SCSI] tcm_loop: Add multi-fabric Linux/SCSI LLD fabric module
[SCSI] qla4xxx: Use polling mode for disable interrupt mailbox completion
[SCSI] Revert "[SCSI] Retrieve the Caching mode page"
[SCSI] bnx2fc: IO completion not processed due to missed wakeup
[SCSI] qla4xxx: Update driver version to 5.02.00-k6
[SCSI] qla4xxx: masking required bits of add_fw_options during initialization
[SCSI] qla4xxx: added new function qla4xxx_relogin_all_devices
[SCSI] qla4xxx: add support for ql4xsess_recovery_tmo cmd line param
[SCSI] qla4xxx: Add support for ql4xmaxqdepth command line parameter
[SCSI] qla4xxx: cleanup function qla4xxx_process_ddb_changed
[SCSI] qla4xxx: Prevent other port reinitialization during remove_adapter
[SCSI] qla4xxx: remove unused ddb flag DF_NO_RELOGIN
[SCSI] qla4xxx: cleanup DDB relogin logic during initialization
[SCSI] qla4xxx: Do not retry ISP82XX initialization if H/W state is failed
[SCSI] qla4xxx: Do not send mbox command if FW is in failed state
[SCSI] qla4xxx: cleanup qla4xxx_initialize_ddb_list()
[SCSI] ses: add subenclosure support
[SCSI] bnx2fc: Bump version to 1.0.1
[SCSI] bnx2fc: Remove unnecessary module state checks
[SCSI] bnx2fc: Fix MTU issue by using static MTU
...
Diffstat (limited to 'drivers/scsi')
47 files changed, 1855 insertions, 772 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 92df4d6b6147..1bd9fd18f7f3 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile | |||
@@ -3,6 +3,6 @@ | |||
3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o | 3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o |
4 | 4 | ||
5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ | 5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ |
6 | dpcsup.o rx.o sa.o rkt.o nark.o | 6 | dpcsup.o rx.o sa.o rkt.o nark.o src.o |
7 | 7 | ||
8 | ccflags-y := -Idrivers/scsi | 8 | ccflags-y := -Idrivers/scsi |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7df2dd1d2c6f..118ce83a737c 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1486 | dev->a_ops.adapter_write = aac_write_block; | 1487 | dev->a_ops.adapter_write = aac_write_block; |
1487 | } | 1488 | } |
1488 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; | 1489 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; |
1489 | if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | 1490 | if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) |
1491 | dev->adapter_info.options |= AAC_OPT_NEW_COMM; | ||
1492 | if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | ||
1490 | /* | 1493 | /* |
1491 | * Worst case size that could cause sg overflow when | 1494 | * Worst case size that could cause sg overflow when |
1492 | * we break up SG elements that are larger than 64KB. | 1495 | * we break up SG elements that are larger than 64KB. |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4dbcc055ac78..29ab00016b78 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | *----------------------------------------------------------------------------*/ | 12 | *----------------------------------------------------------------------------*/ |
13 | 13 | ||
14 | #ifndef AAC_DRIVER_BUILD | 14 | #ifndef AAC_DRIVER_BUILD |
15 | # define AAC_DRIVER_BUILD 26400 | 15 | # define AAC_DRIVER_BUILD 28000 |
16 | # define AAC_DRIVER_BRANCH "-ms" | 16 | # define AAC_DRIVER_BRANCH "-ms" |
17 | #endif | 17 | #endif |
18 | #define MAXIMUM_NUM_CONTAINERS 32 | 18 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -277,6 +277,16 @@ enum aac_queue_types { | |||
277 | 277 | ||
278 | #define FsaNormal 1 | 278 | #define FsaNormal 1 |
279 | 279 | ||
280 | /* transport FIB header (PMC) */ | ||
281 | struct aac_fib_xporthdr { | ||
282 | u64 HostAddress; /* FIB host address w/o xport header */ | ||
283 | u32 Size; /* FIB size excluding xport header */ | ||
284 | u32 Handle; /* driver handle to reference the FIB */ | ||
285 | u64 Reserved[2]; | ||
286 | }; | ||
287 | |||
288 | #define ALIGN32 32 | ||
289 | |||
280 | /* | 290 | /* |
281 | * Define the FIB. The FIB is the where all the requested data and | 291 | * Define the FIB. The FIB is the where all the requested data and |
282 | * command information are put to the application on the FSA adapter. | 292 | * command information are put to the application on the FSA adapter. |
@@ -394,7 +404,9 @@ enum fib_xfer_state { | |||
394 | AdapterMicroFib = (1<<17), | 404 | AdapterMicroFib = (1<<17), |
395 | BIOSFibPath = (1<<18), | 405 | BIOSFibPath = (1<<18), |
396 | FastResponseCapable = (1<<19), | 406 | FastResponseCapable = (1<<19), |
397 | ApiFib = (1<<20) // Its an API Fib. | 407 | ApiFib = (1<<20), /* Its an API Fib */ |
408 | /* PMC NEW COMM: There is no more AIF data pending */ | ||
409 | NoMoreAifDataAvailable = (1<<21) | ||
398 | }; | 410 | }; |
399 | 411 | ||
400 | /* | 412 | /* |
@@ -404,6 +416,7 @@ enum fib_xfer_state { | |||
404 | 416 | ||
405 | #define ADAPTER_INIT_STRUCT_REVISION 3 | 417 | #define ADAPTER_INIT_STRUCT_REVISION 3 |
406 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science | 418 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science |
419 | #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ | ||
407 | 420 | ||
408 | struct aac_init | 421 | struct aac_init |
409 | { | 422 | { |
@@ -428,9 +441,15 @@ struct aac_init | |||
428 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 | 441 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 |
429 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 | 442 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 |
430 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 | 443 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 |
444 | #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 | ||
431 | __le32 MaxIoCommands; /* max outstanding commands */ | 445 | __le32 MaxIoCommands; /* max outstanding commands */ |
432 | __le32 MaxIoSize; /* largest I/O command */ | 446 | __le32 MaxIoSize; /* largest I/O command */ |
433 | __le32 MaxFibSize; /* largest FIB to adapter */ | 447 | __le32 MaxFibSize; /* largest FIB to adapter */ |
448 | /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ | ||
449 | __le32 MaxNumAif; /* max number of aif */ | ||
450 | /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ | ||
451 | __le32 HostRRQ_AddrLow; | ||
452 | __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ | ||
434 | }; | 453 | }; |
435 | 454 | ||
436 | enum aac_log_level { | 455 | enum aac_log_level { |
@@ -685,7 +704,7 @@ struct rx_inbound { | |||
685 | #define OutboundDoorbellReg MUnit.ODR | 704 | #define OutboundDoorbellReg MUnit.ODR |
686 | 705 | ||
687 | struct rx_registers { | 706 | struct rx_registers { |
688 | struct rx_mu_registers MUnit; /* 1300h - 1344h */ | 707 | struct rx_mu_registers MUnit; /* 1300h - 1347h */ |
689 | __le32 reserved1[2]; /* 1348h - 134ch */ | 708 | __le32 reserved1[2]; /* 1348h - 134ch */ |
690 | struct rx_inbound IndexRegs; | 709 | struct rx_inbound IndexRegs; |
691 | }; | 710 | }; |
@@ -703,7 +722,7 @@ struct rx_registers { | |||
703 | #define rkt_inbound rx_inbound | 722 | #define rkt_inbound rx_inbound |
704 | 723 | ||
705 | struct rkt_registers { | 724 | struct rkt_registers { |
706 | struct rkt_mu_registers MUnit; /* 1300h - 1344h */ | 725 | struct rkt_mu_registers MUnit; /* 1300h - 1347h */ |
707 | __le32 reserved1[1006]; /* 1348h - 22fch */ | 726 | __le32 reserved1[1006]; /* 1348h - 22fch */ |
708 | struct rkt_inbound IndexRegs; /* 2300h - */ | 727 | struct rkt_inbound IndexRegs; /* 2300h - */ |
709 | }; | 728 | }; |
@@ -713,6 +732,44 @@ struct rkt_registers { | |||
713 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) | 732 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) |
714 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) | 733 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) |
715 | 734 | ||
735 | /* | ||
736 | * PMC SRC message unit registers | ||
737 | */ | ||
738 | |||
739 | #define src_inbound rx_inbound | ||
740 | |||
741 | struct src_mu_registers { | ||
742 | /* PCI*| Name */ | ||
743 | __le32 reserved0[8]; /* 00h | Reserved */ | ||
744 | __le32 IDR; /* 20h | Inbound Doorbell Register */ | ||
745 | __le32 IISR; /* 24h | Inbound Int. Status Register */ | ||
746 | __le32 reserved1[3]; /* 28h | Reserved */ | ||
747 | __le32 OIMR; /* 34h | Outbound Int. Mask Register */ | ||
748 | __le32 reserved2[25]; /* 38h | Reserved */ | ||
749 | __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ | ||
750 | __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ | ||
751 | __le32 reserved3[6]; /* a4h | Reserved */ | ||
752 | __le32 OMR; /* bch | Outbound Message Register */ | ||
753 | __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ | ||
754 | __le32 IQ_H; /* c4h | Inbound Queue (High address) */ | ||
755 | }; | ||
756 | |||
757 | struct src_registers { | ||
758 | struct src_mu_registers MUnit; /* 00h - c7h */ | ||
759 | __le32 reserved1[130790]; /* c8h - 7fc5fh */ | ||
760 | struct src_inbound IndexRegs; /* 7fc60h */ | ||
761 | }; | ||
762 | |||
763 | #define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) | ||
764 | #define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) | ||
765 | #define src_writeb(AEP, CSR, value) writeb(value, \ | ||
766 | &((AEP)->regs.src.bar0->CSR)) | ||
767 | #define src_writel(AEP, CSR, value) writel(value, \ | ||
768 | &((AEP)->regs.src.bar0->CSR)) | ||
769 | |||
770 | #define SRC_ODR_SHIFT 12 | ||
771 | #define SRC_IDR_SHIFT 9 | ||
772 | |||
716 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); | 773 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); |
717 | 774 | ||
718 | struct aac_fib_context { | 775 | struct aac_fib_context { |
@@ -879,6 +936,7 @@ struct aac_supplement_adapter_info | |||
879 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) | 936 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) |
880 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) | 937 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) |
881 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) | 938 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) |
939 | #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) | ||
882 | #define AAC_SIS_VERSION_V3 3 | 940 | #define AAC_SIS_VERSION_V3 3 |
883 | #define AAC_SIS_SLOT_UNKNOWN 0xFF | 941 | #define AAC_SIS_SLOT_UNKNOWN 0xFF |
884 | 942 | ||
@@ -940,6 +998,7 @@ struct aac_bus_info_response { | |||
940 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) | 998 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) |
941 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) | 999 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) |
942 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) | 1000 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) |
1001 | #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) | ||
943 | 1002 | ||
944 | struct aac_dev | 1003 | struct aac_dev |
945 | { | 1004 | { |
@@ -952,6 +1011,7 @@ struct aac_dev | |||
952 | */ | 1011 | */ |
953 | unsigned max_fib_size; | 1012 | unsigned max_fib_size; |
954 | unsigned sg_tablesize; | 1013 | unsigned sg_tablesize; |
1014 | unsigned max_num_aif; | ||
955 | 1015 | ||
956 | /* | 1016 | /* |
957 | * Map for 128 fib objects (64k) | 1017 | * Map for 128 fib objects (64k) |
@@ -980,10 +1040,21 @@ struct aac_dev | |||
980 | struct adapter_ops a_ops; | 1040 | struct adapter_ops a_ops; |
981 | unsigned long fsrev; /* Main driver's revision number */ | 1041 | unsigned long fsrev; /* Main driver's revision number */ |
982 | 1042 | ||
983 | unsigned base_size; /* Size of mapped in region */ | 1043 | unsigned long dbg_base; /* address of UART |
1044 | * debug buffer */ | ||
1045 | |||
1046 | unsigned base_size, dbg_size; /* Size of | ||
1047 | * mapped in region */ | ||
1048 | |||
984 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ | 1049 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ |
985 | dma_addr_t init_pa; /* Holds physical address of the init struct */ | 1050 | dma_addr_t init_pa; /* Holds physical address of the init struct */ |
986 | 1051 | ||
1052 | u32 *host_rrq; /* response queue | ||
1053 | * if AAC_COMM_MESSAGE_TYPE1 */ | ||
1054 | |||
1055 | dma_addr_t host_rrq_pa; /* phys. address */ | ||
1056 | u32 host_rrq_idx; /* index into rrq buffer */ | ||
1057 | |||
987 | struct pci_dev *pdev; /* Our PCI interface */ | 1058 | struct pci_dev *pdev; /* Our PCI interface */ |
988 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ | 1059 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ |
989 | void * comm_addr; /* Base address of Comm area */ | 1060 | void * comm_addr; /* Base address of Comm area */ |
@@ -1003,14 +1074,20 @@ struct aac_dev | |||
1003 | */ | 1074 | */ |
1004 | #ifndef AAC_MIN_FOOTPRINT_SIZE | 1075 | #ifndef AAC_MIN_FOOTPRINT_SIZE |
1005 | # define AAC_MIN_FOOTPRINT_SIZE 8192 | 1076 | # define AAC_MIN_FOOTPRINT_SIZE 8192 |
1077 | # define AAC_MIN_SRC_BAR0_SIZE 0x400000 | ||
1078 | # define AAC_MIN_SRC_BAR1_SIZE 0x800 | ||
1006 | #endif | 1079 | #endif |
1007 | union | 1080 | union |
1008 | { | 1081 | { |
1009 | struct sa_registers __iomem *sa; | 1082 | struct sa_registers __iomem *sa; |
1010 | struct rx_registers __iomem *rx; | 1083 | struct rx_registers __iomem *rx; |
1011 | struct rkt_registers __iomem *rkt; | 1084 | struct rkt_registers __iomem *rkt; |
1085 | struct { | ||
1086 | struct src_registers __iomem *bar0; | ||
1087 | char __iomem *bar1; | ||
1088 | } src; | ||
1012 | } regs; | 1089 | } regs; |
1013 | volatile void __iomem *base; | 1090 | volatile void __iomem *base, *dbg_base_mapped; |
1014 | volatile struct rx_inbound __iomem *IndexRegs; | 1091 | volatile struct rx_inbound __iomem *IndexRegs; |
1015 | u32 OIMR; /* Mask Register Cache */ | 1092 | u32 OIMR; /* Mask Register Cache */ |
1016 | /* | 1093 | /* |
@@ -1031,9 +1108,8 @@ struct aac_dev | |||
1031 | u8 comm_interface; | 1108 | u8 comm_interface; |
1032 | # define AAC_COMM_PRODUCER 0 | 1109 | # define AAC_COMM_PRODUCER 0 |
1033 | # define AAC_COMM_MESSAGE 1 | 1110 | # define AAC_COMM_MESSAGE 1 |
1034 | /* macro side-effects BEWARE */ | 1111 | # define AAC_COMM_MESSAGE_TYPE1 3 |
1035 | # define raw_io_interface \ | 1112 | u8 raw_io_interface; |
1036 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) | ||
1037 | u8 raw_io_64; | 1113 | u8 raw_io_64; |
1038 | u8 printf_enabled; | 1114 | u8 printf_enabled; |
1039 | u8 in_reset; | 1115 | u8 in_reset; |
@@ -1789,6 +1865,10 @@ extern struct aac_common aac_config; | |||
1789 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ | 1865 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ |
1790 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ | 1866 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ |
1791 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ | 1867 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ |
1868 | #define DoorBellAifPending (1<<6) /* Adapter -> Host */ | ||
1869 | |||
1870 | /* PMC specific outbound doorbell bits */ | ||
1871 | #define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ | ||
1792 | 1872 | ||
1793 | /* | 1873 | /* |
1794 | * For FIB communication, we need all of the following things | 1874 | * For FIB communication, we need all of the following things |
@@ -1831,6 +1911,9 @@ extern struct aac_common aac_config; | |||
1831 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ | 1911 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ |
1832 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ | 1912 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ |
1833 | 1913 | ||
1914 | /* PMC NEW COMM: Request the event data */ | ||
1915 | #define AifReqEvent 200 | ||
1916 | |||
1834 | /* | 1917 | /* |
1835 | * Adapter Initiated FIB command structures. Start with the adapter | 1918 | * Adapter Initiated FIB command structures. Start with the adapter |
1836 | * initiated FIBs that really come from the adapter, and get responded | 1919 | * initiated FIBs that really come from the adapter, and get responded |
@@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev); | |||
1886 | int aac_rkt_init(struct aac_dev *dev); | 1969 | int aac_rkt_init(struct aac_dev *dev); |
1887 | int aac_nark_init(struct aac_dev *dev); | 1970 | int aac_nark_init(struct aac_dev *dev); |
1888 | int aac_sa_init(struct aac_dev *dev); | 1971 | int aac_sa_init(struct aac_dev *dev); |
1972 | int aac_src_init(struct aac_dev *dev); | ||
1889 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); | 1973 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); |
1890 | unsigned int aac_response_normal(struct aac_queue * q); | 1974 | unsigned int aac_response_normal(struct aac_queue * q); |
1891 | unsigned int aac_command_normal(struct aac_queue * q); | 1975 | unsigned int aac_command_normal(struct aac_queue * q); |
1892 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); | 1976 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, |
1977 | int isAif, int isFastResponse, | ||
1978 | struct hw_fib *aif_fib); | ||
1893 | int aac_reset_adapter(struct aac_dev * dev, int forced); | 1979 | int aac_reset_adapter(struct aac_dev * dev, int forced); |
1894 | int aac_check_health(struct aac_dev * dev); | 1980 | int aac_check_health(struct aac_dev * dev); |
1895 | int aac_command_thread(void *data); | 1981 | int aac_command_thread(void *data); |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 645ddd9d9b9e..8a0b33033177 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index a7261486ccd4..7ac8fdb5577b 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
52 | unsigned long size, align; | 53 | unsigned long size, align; |
53 | const unsigned long fibsize = 4096; | 54 | const unsigned long fibsize = 4096; |
54 | const unsigned long printfbufsiz = 256; | 55 | const unsigned long printfbufsiz = 256; |
56 | unsigned long host_rrq_size = 0; | ||
55 | struct aac_init *init; | 57 | struct aac_init *init; |
56 | dma_addr_t phys; | 58 | dma_addr_t phys; |
57 | unsigned long aac_max_hostphysmempages; | 59 | unsigned long aac_max_hostphysmempages; |
58 | 60 | ||
59 | size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; | 61 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) |
60 | 62 | host_rrq_size = (dev->scsi_host_ptr->can_queue | |
63 | + AAC_NUM_MGT_FIB) * sizeof(u32); | ||
64 | size = fibsize + sizeof(struct aac_init) + commsize + | ||
65 | commalign + printfbufsiz + host_rrq_size; | ||
61 | 66 | ||
62 | base = pci_alloc_consistent(dev->pdev, size, &phys); | 67 | base = pci_alloc_consistent(dev->pdev, size, &phys); |
63 | 68 | ||
@@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
70 | dev->comm_phys = phys; | 75 | dev->comm_phys = phys; |
71 | dev->comm_size = size; | 76 | dev->comm_size = size; |
72 | 77 | ||
73 | dev->init = (struct aac_init *)(base + fibsize); | 78 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { |
74 | dev->init_pa = phys + fibsize; | 79 | dev->host_rrq = (u32 *)(base + fibsize); |
80 | dev->host_rrq_pa = phys + fibsize; | ||
81 | memset(dev->host_rrq, 0, host_rrq_size); | ||
82 | } | ||
83 | |||
84 | dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); | ||
85 | dev->init_pa = phys + fibsize + host_rrq_size; | ||
75 | 86 | ||
76 | init = dev->init; | 87 | init = dev->init; |
77 | 88 | ||
@@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
106 | 117 | ||
107 | init->InitFlags = 0; | 118 | init->InitFlags = 0; |
108 | if (dev->comm_interface == AAC_COMM_MESSAGE) { | 119 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
109 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); | 120 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); |
110 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); | 121 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); |
122 | } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
123 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); | ||
124 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); | ||
125 | dprintk((KERN_WARNING | ||
126 | "aacraid: New Comm Interface type1 enabled\n")); | ||
111 | } | 127 | } |
112 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | | 128 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | |
113 | INITFLAGS_DRIVER_SUPPORTS_PM); | 129 | INITFLAGS_DRIVER_SUPPORTS_PM); |
@@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
115 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); | 131 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); |
116 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); | 132 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); |
117 | 133 | ||
134 | init->MaxNumAif = cpu_to_le32(dev->max_num_aif); | ||
135 | init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); | ||
136 | init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); | ||
137 | |||
138 | |||
118 | /* | 139 | /* |
119 | * Increment the base address by the amount already used | 140 | * Increment the base address by the amount already used |
120 | */ | 141 | */ |
121 | base = base + fibsize + sizeof(struct aac_init); | 142 | base = base + fibsize + host_rrq_size + sizeof(struct aac_init); |
122 | phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); | 143 | phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + |
144 | sizeof(struct aac_init)); | ||
145 | |||
123 | /* | 146 | /* |
124 | * Align the beginning of Headers to commalign | 147 | * Align the beginning of Headers to commalign |
125 | */ | 148 | */ |
@@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
314 | - sizeof(struct aac_write) + sizeof(struct sgentry)) | 337 | - sizeof(struct aac_write) + sizeof(struct sgentry)) |
315 | / sizeof(struct sgentry); | 338 | / sizeof(struct sgentry); |
316 | dev->comm_interface = AAC_COMM_PRODUCER; | 339 | dev->comm_interface = AAC_COMM_PRODUCER; |
317 | dev->raw_io_64 = 0; | 340 | dev->raw_io_interface = dev->raw_io_64 = 0; |
341 | |||
318 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 342 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
319 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && | 343 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && |
320 | (status[0] == 0x00000001)) { | 344 | (status[0] == 0x00000001)) { |
321 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) | 345 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) |
322 | dev->raw_io_64 = 1; | 346 | dev->raw_io_64 = 1; |
323 | if (dev->a_ops.adapter_comm && | 347 | if (dev->a_ops.adapter_comm) { |
324 | (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) | 348 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { |
325 | dev->comm_interface = AAC_COMM_MESSAGE; | 349 | dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; |
350 | dev->raw_io_interface = 1; | ||
351 | } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) { | ||
352 | dev->comm_interface = AAC_COMM_MESSAGE; | ||
353 | dev->raw_io_interface = 1; | ||
354 | } | ||
355 | } | ||
326 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && | 356 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && |
327 | (status[2] > dev->base_size)) { | 357 | (status[2] > dev->base_size)) { |
328 | aac_adapter_ioremap(dev, 0); | 358 | aac_adapter_ioremap(dev, 0); |
@@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
350 | * status[3] & 0xFFFF maximum number FIBs outstanding | 380 | * status[3] & 0xFFFF maximum number FIBs outstanding |
351 | */ | 381 | */ |
352 | host->max_sectors = (status[1] >> 16) << 1; | 382 | host->max_sectors = (status[1] >> 16) << 1; |
353 | dev->max_fib_size = status[1] & 0xFFFF; | 383 | /* Multiple of 32 for PMC */ |
384 | dev->max_fib_size = status[1] & 0xFFE0; | ||
354 | host->sg_tablesize = status[2] >> 16; | 385 | host->sg_tablesize = status[2] >> 16; |
355 | dev->sg_tablesize = status[2] & 0xFFFF; | 386 | dev->sg_tablesize = status[2] & 0xFFFF; |
356 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; | 387 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; |
388 | dev->max_num_aif = status[4] & 0xFFFF; | ||
357 | /* | 389 | /* |
358 | * NOTE: | 390 | * NOTE: |
359 | * All these overrides are based on a fixed internal | 391 | * All these overrides are based on a fixed internal |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 060ac4bd5a14..dd7ad3ba2dad 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev) | |||
63 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", | 64 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", |
64 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, | 65 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, |
65 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); | 66 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); |
66 | if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size | 67 | dev->hw_fib_va = pci_alloc_consistent(dev->pdev, |
67 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), | 68 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) |
68 | &dev->hw_fib_pa))==NULL) | 69 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), |
70 | &dev->hw_fib_pa); | ||
71 | if (dev->hw_fib_va == NULL) | ||
69 | return -ENOMEM; | 72 | return -ENOMEM; |
70 | return 0; | 73 | return 0; |
71 | } | 74 | } |
@@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev) | |||
110 | if (i<0) | 113 | if (i<0) |
111 | return -ENOMEM; | 114 | return -ENOMEM; |
112 | 115 | ||
116 | /* 32 byte alignment for PMC */ | ||
117 | hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); | ||
118 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
119 | (hw_fib_pa - dev->hw_fib_pa)); | ||
120 | dev->hw_fib_pa = hw_fib_pa; | ||
121 | memset(dev->hw_fib_va, 0, | ||
122 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * | ||
123 | (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
124 | |||
125 | /* add Xport header */ | ||
126 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
127 | sizeof(struct aac_fib_xporthdr)); | ||
128 | dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); | ||
129 | |||
113 | hw_fib = dev->hw_fib_va; | 130 | hw_fib = dev->hw_fib_va; |
114 | hw_fib_pa = dev->hw_fib_pa; | 131 | hw_fib_pa = dev->hw_fib_pa; |
115 | memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
116 | /* | 132 | /* |
117 | * Initialise the fibs | 133 | * Initialise the fibs |
118 | */ | 134 | */ |
@@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev) | |||
129 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); | 145 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); |
130 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); | 146 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); |
131 | fibptr->hw_fib_pa = hw_fib_pa; | 147 | fibptr->hw_fib_pa = hw_fib_pa; |
132 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size); | 148 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + |
133 | hw_fib_pa = hw_fib_pa + dev->max_fib_size; | 149 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); |
150 | hw_fib_pa = hw_fib_pa + | ||
151 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr); | ||
134 | } | 152 | } |
135 | /* | 153 | /* |
136 | * Add the fib chain to the free list | 154 | * Add the fib chain to the free list |
@@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
664 | unsigned long nointr = 0; | 682 | unsigned long nointr = 0; |
665 | unsigned long qflags; | 683 | unsigned long qflags; |
666 | 684 | ||
685 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
686 | kfree(hw_fib); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
667 | if (hw_fib->header.XferState == 0) { | 690 | if (hw_fib->header.XferState == 0) { |
668 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 691 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
669 | kfree (hw_fib); | 692 | kfree(hw_fib); |
670 | return 0; | 693 | return 0; |
671 | } | 694 | } |
672 | /* | 695 | /* |
@@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
674 | */ | 697 | */ |
675 | if (hw_fib->header.StructType != FIB_MAGIC) { | 698 | if (hw_fib->header.StructType != FIB_MAGIC) { |
676 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 699 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
677 | kfree (hw_fib); | 700 | kfree(hw_fib); |
678 | return -EINVAL; | 701 | return -EINVAL; |
679 | } | 702 | } |
680 | /* | 703 | /* |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 9c7408fe8c7d..f0c66a80ad13 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
228 | return 0; | 229 | return 0; |
229 | } | 230 | } |
230 | 231 | ||
232 | /* | ||
233 | * | ||
234 | * aac_aif_callback | ||
235 | * @context: the context set in the fib - here it is scsi cmd | ||
236 | * @fibptr: pointer to the fib | ||
237 | * | ||
238 | * Handles the AIFs - new method (SRC) | ||
239 | * | ||
240 | */ | ||
241 | |||
242 | static void aac_aif_callback(void *context, struct fib * fibptr) | ||
243 | { | ||
244 | struct fib *fibctx; | ||
245 | struct aac_dev *dev; | ||
246 | struct aac_aifcmd *cmd; | ||
247 | int status; | ||
248 | |||
249 | fibctx = (struct fib *)context; | ||
250 | BUG_ON(fibptr == NULL); | ||
251 | dev = fibptr->dev; | ||
252 | |||
253 | if (fibptr->hw_fib_va->header.XferState & | ||
254 | cpu_to_le32(NoMoreAifDataAvailable)) { | ||
255 | aac_fib_complete(fibptr); | ||
256 | aac_fib_free(fibptr); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); | ||
261 | |||
262 | aac_fib_init(fibctx); | ||
263 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
264 | cmd->command = cpu_to_le32(AifReqEvent); | ||
265 | |||
266 | status = aac_fib_send(AifRequest, | ||
267 | fibctx, | ||
268 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
269 | FsaNormal, | ||
270 | 0, 1, | ||
271 | (fib_callback)aac_aif_callback, fibctx); | ||
272 | } | ||
273 | |||
231 | 274 | ||
232 | /** | 275 | /** |
233 | * aac_intr_normal - Handle command replies | 276 | * aac_intr_normal - Handle command replies |
@@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
238 | * know there is a response on our normal priority queue. We will pull off | 281 | * know there is a response on our normal priority queue. We will pull off |
239 | * all QE there are and wake up all the waiters before exiting. | 282 | * all QE there are and wake up all the waiters before exiting. |
240 | */ | 283 | */ |
241 | 284 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, | |
242 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | 285 | int isAif, int isFastResponse, struct hw_fib *aif_fib) |
243 | { | 286 | { |
244 | unsigned long mflags; | 287 | unsigned long mflags; |
245 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); | 288 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
246 | if ((index & 0x00000002L)) { | 289 | if (isAif == 1) { /* AIF - common */ |
247 | struct hw_fib * hw_fib; | 290 | struct hw_fib * hw_fib; |
248 | struct fib * fib; | 291 | struct fib * fib; |
249 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | 292 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; |
250 | unsigned long flags; | 293 | unsigned long flags; |
251 | 294 | ||
252 | if (index == 0xFFFFFFFEL) /* Special Case */ | ||
253 | return 0; /* Do nothing */ | ||
254 | /* | 295 | /* |
255 | * Allocate a FIB. For non queued stuff we can just use | 296 | * Allocate a FIB. For non queued stuff we can just use |
256 | * the stack so we are happy. We need a fib object in order to | 297 | * the stack so we are happy. We need a fib object in order to |
@@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
263 | kfree (fib); | 304 | kfree (fib); |
264 | return 1; | 305 | return 1; |
265 | } | 306 | } |
266 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | 307 | if (aif_fib != NULL) { |
267 | (index & ~0x00000002L)), sizeof(struct hw_fib)); | 308 | memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); |
309 | } else { | ||
310 | memcpy(hw_fib, | ||
311 | (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | ||
312 | index), sizeof(struct hw_fib)); | ||
313 | } | ||
268 | INIT_LIST_HEAD(&fib->fiblink); | 314 | INIT_LIST_HEAD(&fib->fiblink); |
269 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 315 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
270 | fib->size = sizeof(struct fib); | 316 | fib->size = sizeof(struct fib); |
@@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
277 | wake_up_interruptible(&q->cmdready); | 323 | wake_up_interruptible(&q->cmdready); |
278 | spin_unlock_irqrestore(q->lock, flags); | 324 | spin_unlock_irqrestore(q->lock, flags); |
279 | return 1; | 325 | return 1; |
326 | } else if (isAif == 2) { /* AIF - new (SRC) */ | ||
327 | struct fib *fibctx; | ||
328 | struct aac_aifcmd *cmd; | ||
329 | |||
330 | fibctx = aac_fib_alloc(dev); | ||
331 | if (!fibctx) | ||
332 | return 1; | ||
333 | aac_fib_init(fibctx); | ||
334 | |||
335 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
336 | cmd->command = cpu_to_le32(AifReqEvent); | ||
337 | |||
338 | return aac_fib_send(AifRequest, | ||
339 | fibctx, | ||
340 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
341 | FsaNormal, | ||
342 | 0, 1, | ||
343 | (fib_callback)aac_aif_callback, fibctx); | ||
280 | } else { | 344 | } else { |
281 | int fast = index & 0x01; | 345 | struct fib *fib = &dev->fibs[index]; |
282 | struct fib * fib = &dev->fibs[index >> 2]; | ||
283 | struct hw_fib * hwfib = fib->hw_fib_va; | 346 | struct hw_fib * hwfib = fib->hw_fib_va; |
284 | 347 | ||
285 | /* | 348 | /* |
@@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
298 | return 0; | 361 | return 0; |
299 | } | 362 | } |
300 | 363 | ||
301 | if (fast) { | 364 | if (isFastResponse) { |
302 | /* | 365 | /* |
303 | * Doctor the fib | 366 | * Doctor the fib |
304 | */ | 367 | */ |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 2c93d9496d62..4ff26521d75f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -54,7 +55,7 @@ | |||
54 | 55 | ||
55 | #include "aacraid.h" | 56 | #include "aacraid.h" |
56 | 57 | ||
57 | #define AAC_DRIVER_VERSION "1.1-5" | 58 | #define AAC_DRIVER_VERSION "1.1-7" |
58 | #ifndef AAC_DRIVER_BRANCH | 59 | #ifndef AAC_DRIVER_BRANCH |
59 | #define AAC_DRIVER_BRANCH "" | 60 | #define AAC_DRIVER_BRANCH "" |
60 | #endif | 61 | #endif |
@@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = { | |||
161 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ | 162 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ |
162 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ | 163 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ |
163 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ | 164 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ |
165 | { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ | ||
164 | { 0,} | 166 | { 0,} |
165 | }; | 167 | }; |
166 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); | 168 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); |
@@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = { | |||
235 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ | 237 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ |
236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 238 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 239 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ | 240 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
241 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ | ||
239 | }; | 242 | }; |
240 | 243 | ||
241 | /** | 244 | /** |
@@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) | |||
653 | * This adapter needs a blind reset, only do so for Adapters that | 656 | * This adapter needs a blind reset, only do so for Adapters that |
654 | * support a register, instead of a commanded, reset. | 657 | * support a register, instead of a commanded, reset. |
655 | */ | 658 | */ |
656 | if ((aac->supplement_adapter_info.SupportedOptions2 & | 659 | if (((aac->supplement_adapter_info.SupportedOptions2 & |
657 | AAC_OPTION_MU_RESET) && | 660 | AAC_OPTION_MU_RESET) || |
661 | (aac->supplement_adapter_info.SupportedOptions2 & | ||
662 | AAC_OPTION_DOORBELL_RESET)) && | ||
658 | aac_check_reset && | 663 | aac_check_reset && |
659 | ((aac_check_reset != 1) || | 664 | ((aac_check_reset != 1) || |
660 | !(aac->supplement_adapter_info.SupportedOptions2 & | 665 | !(aac->supplement_adapter_info.SupportedOptions2 & |
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index c55f7c862f0e..f397d21a0c06 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * based on the old aacraid driver that is.. | 4 | * based on the old aacraid driver that is.. |
5 | * Adaptec aacraid device driver for Linux. | 5 | * Adaptec aacraid device driver for Linux. |
6 | * | 6 | * |
7 | * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com) | 7 | * Copyright (c) 2000-2010 Adaptec, Inc. |
8 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 16d8db550027..be44de92429a 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 84d77fd86e5b..ce530f113fdb 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) | |||
84 | 85 | ||
85 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) | 86 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) |
86 | { | 87 | { |
88 | int isAif, isFastResponse, isSpecial; | ||
87 | struct aac_dev *dev = dev_id; | 89 | struct aac_dev *dev = dev_id; |
88 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); | 90 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); |
89 | if (unlikely(Index == 0xFFFFFFFFL)) | 91 | if (unlikely(Index == 0xFFFFFFFFL)) |
90 | Index = rx_readl(dev, MUnit.OutboundQueue); | 92 | Index = rx_readl(dev, MUnit.OutboundQueue); |
91 | if (likely(Index != 0xFFFFFFFFL)) { | 93 | if (likely(Index != 0xFFFFFFFFL)) { |
92 | do { | 94 | do { |
93 | if (unlikely(aac_intr_normal(dev, Index))) { | 95 | isAif = isFastResponse = isSpecial = 0; |
94 | rx_writel(dev, MUnit.OutboundQueue, Index); | 96 | if (Index & 0x00000002L) { |
95 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); | 97 | isAif = 1; |
98 | if (Index == 0xFFFFFFFEL) | ||
99 | isSpecial = 1; | ||
100 | Index &= ~0x00000002L; | ||
101 | } else { | ||
102 | if (Index & 0x00000001L) | ||
103 | isFastResponse = 1; | ||
104 | Index >>= 2; | ||
105 | } | ||
106 | if (!isSpecial) { | ||
107 | if (unlikely(aac_intr_normal(dev, | ||
108 | Index, isAif, | ||
109 | isFastResponse, NULL))) { | ||
110 | rx_writel(dev, | ||
111 | MUnit.OutboundQueue, | ||
112 | Index); | ||
113 | rx_writel(dev, | ||
114 | MUnit.ODR, | ||
115 | DoorBellAdapterNormRespReady); | ||
116 | } | ||
96 | } | 117 | } |
97 | Index = rx_readl(dev, MUnit.OutboundQueue); | 118 | Index = rx_readl(dev, MUnit.OutboundQueue); |
98 | } while (Index != 0xFFFFFFFFL); | 119 | } while (Index != 0xFFFFFFFFL); |
@@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev) | |||
631 | name, instance); | 652 | name, instance); |
632 | goto error_iounmap; | 653 | goto error_iounmap; |
633 | } | 654 | } |
655 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
656 | dev->dbg_base_mapped = dev->base; | ||
657 | dev->dbg_size = dev->base_size; | ||
658 | |||
634 | aac_adapter_enable_int(dev); | 659 | aac_adapter_enable_int(dev); |
635 | /* | 660 | /* |
636 | * Tell the adapter that all is configured, and it can | 661 | * Tell the adapter that all is configured, and it can |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 622c21c68e65..e5d4457121ea 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev) | |||
391 | name, instance); | 392 | name, instance); |
392 | goto error_iounmap; | 393 | goto error_iounmap; |
393 | } | 394 | } |
395 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
396 | dev->dbg_base_mapped = dev->base; | ||
397 | dev->dbg_size = dev->base_size; | ||
398 | |||
394 | aac_adapter_enable_int(dev); | 399 | aac_adapter_enable_int(dev); |
395 | 400 | ||
396 | /* | 401 | /* |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c new file mode 100644 index 000000000000..c20494660603 --- /dev/null +++ b/drivers/scsi/aacraid/src.c | |||
@@ -0,0 +1,594 @@ | |||
1 | /* | ||
2 | * Adaptec AAC series RAID controller driver | ||
3 | * (c) Copyright 2001 Red Hat Inc. | ||
4 | * | ||
5 | * based on the old aacraid driver that is.. | ||
6 | * Adaptec aacraid device driver for Linux. | ||
7 | * | ||
8 | * Copyright (c) 2000-2010 Adaptec, Inc. | ||
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Module Name: | ||
26 | * src.c | ||
27 | * | ||
28 | * Abstract: Hardware Device Interface for PMC SRC based controllers | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/version.h> | ||
41 | #include <linux/completion.h> | ||
42 | #include <linux/time.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | |||
46 | #include "aacraid.h" | ||
47 | |||
48 | static irqreturn_t aac_src_intr_message(int irq, void *dev_id) | ||
49 | { | ||
50 | struct aac_dev *dev = dev_id; | ||
51 | unsigned long bellbits, bellbits_shifted; | ||
52 | int our_interrupt = 0; | ||
53 | int isFastResponse; | ||
54 | u32 index, handle; | ||
55 | |||
56 | bellbits = src_readl(dev, MUnit.ODR_R); | ||
57 | if (bellbits & PmDoorBellResponseSent) { | ||
58 | bellbits = PmDoorBellResponseSent; | ||
59 | /* handle async. status */ | ||
60 | our_interrupt = 1; | ||
61 | index = dev->host_rrq_idx; | ||
62 | if (dev->host_rrq[index] == 0) { | ||
63 | u32 old_index = index; | ||
64 | /* adjust index */ | ||
65 | do { | ||
66 | index++; | ||
67 | if (index == dev->scsi_host_ptr->can_queue + | ||
68 | AAC_NUM_MGT_FIB) | ||
69 | index = 0; | ||
70 | if (dev->host_rrq[index] != 0) | ||
71 | break; | ||
72 | } while (index != old_index); | ||
73 | dev->host_rrq_idx = index; | ||
74 | } | ||
75 | for (;;) { | ||
76 | isFastResponse = 0; | ||
77 | /* remove toggle bit (31) */ | ||
78 | handle = (dev->host_rrq[index] & 0x7fffffff); | ||
79 | /* check fast response bit (30) */ | ||
80 | if (handle & 0x40000000) | ||
81 | isFastResponse = 1; | ||
82 | handle &= 0x0000ffff; | ||
83 | if (handle == 0) | ||
84 | break; | ||
85 | |||
86 | aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); | ||
87 | |||
88 | dev->host_rrq[index++] = 0; | ||
89 | if (index == dev->scsi_host_ptr->can_queue + | ||
90 | AAC_NUM_MGT_FIB) | ||
91 | index = 0; | ||
92 | dev->host_rrq_idx = index; | ||
93 | } | ||
94 | } else { | ||
95 | bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); | ||
96 | if (bellbits_shifted & DoorBellAifPending) { | ||
97 | our_interrupt = 1; | ||
98 | /* handle AIF */ | ||
99 | aac_intr_normal(dev, 0, 2, 0, NULL); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | if (our_interrupt) { | ||
104 | src_writel(dev, MUnit.ODR_C, bellbits); | ||
105 | return IRQ_HANDLED; | ||
106 | } | ||
107 | return IRQ_NONE; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * aac_src_disable_interrupt - Disable interrupts | ||
112 | * @dev: Adapter | ||
113 | */ | ||
114 | |||
115 | static void aac_src_disable_interrupt(struct aac_dev *dev) | ||
116 | { | ||
117 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * aac_src_enable_interrupt_message - Enable interrupts | ||
122 | * @dev: Adapter | ||
123 | */ | ||
124 | |||
125 | static void aac_src_enable_interrupt_message(struct aac_dev *dev) | ||
126 | { | ||
127 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * src_sync_cmd - send a command and wait | ||
132 | * @dev: Adapter | ||
133 | * @command: Command to execute | ||
134 | * @p1: first parameter | ||
135 | * @ret: adapter status | ||
136 | * | ||
137 | * This routine will send a synchronous command to the adapter and wait | ||
138 | * for its completion. | ||
139 | */ | ||
140 | |||
141 | static int src_sync_cmd(struct aac_dev *dev, u32 command, | ||
142 | u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, | ||
143 | u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) | ||
144 | { | ||
145 | unsigned long start; | ||
146 | int ok; | ||
147 | |||
148 | /* | ||
149 | * Write the command into Mailbox 0 | ||
150 | */ | ||
151 | writel(command, &dev->IndexRegs->Mailbox[0]); | ||
152 | /* | ||
153 | * Write the parameters into Mailboxes 1 - 6 | ||
154 | */ | ||
155 | writel(p1, &dev->IndexRegs->Mailbox[1]); | ||
156 | writel(p2, &dev->IndexRegs->Mailbox[2]); | ||
157 | writel(p3, &dev->IndexRegs->Mailbox[3]); | ||
158 | writel(p4, &dev->IndexRegs->Mailbox[4]); | ||
159 | |||
160 | /* | ||
161 | * Clear the synch command doorbell to start on a clean slate. | ||
162 | */ | ||
163 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
164 | |||
165 | /* | ||
166 | * Disable doorbell interrupts | ||
167 | */ | ||
168 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
169 | |||
170 | /* | ||
171 | * Force the completion of the mask register write before issuing | ||
172 | * the interrupt. | ||
173 | */ | ||
174 | src_readl(dev, MUnit.OIMR); | ||
175 | |||
176 | /* | ||
177 | * Signal that there is a new synch command | ||
178 | */ | ||
179 | src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); | ||
180 | |||
181 | ok = 0; | ||
182 | start = jiffies; | ||
183 | |||
184 | /* | ||
185 | * Wait up to 30 seconds | ||
186 | */ | ||
187 | while (time_before(jiffies, start+30*HZ)) { | ||
188 | /* Delay 5 microseconds to let Mon960 get info. */ | ||
189 | udelay(5); | ||
190 | |||
191 | /* Mon960 will set doorbell0 bit | ||
192 | * when it has completed the command | ||
193 | */ | ||
194 | if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { | ||
195 | /* Clear the doorbell */ | ||
196 | src_writel(dev, | ||
197 | MUnit.ODR_C, | ||
198 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
199 | ok = 1; | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | /* Yield the processor in case we are slow */ | ||
204 | msleep(1); | ||
205 | } | ||
206 | if (unlikely(ok != 1)) { | ||
207 | /* Restore interrupt mask even though we timed out */ | ||
208 | aac_adapter_enable_int(dev); | ||
209 | return -ETIMEDOUT; | ||
210 | } | ||
211 | |||
212 | /* Pull the synch status from Mailbox 0 */ | ||
213 | if (status) | ||
214 | *status = readl(&dev->IndexRegs->Mailbox[0]); | ||
215 | if (r1) | ||
216 | *r1 = readl(&dev->IndexRegs->Mailbox[1]); | ||
217 | if (r2) | ||
218 | *r2 = readl(&dev->IndexRegs->Mailbox[2]); | ||
219 | if (r3) | ||
220 | *r3 = readl(&dev->IndexRegs->Mailbox[3]); | ||
221 | if (r4) | ||
222 | *r4 = readl(&dev->IndexRegs->Mailbox[4]); | ||
223 | |||
224 | /* Clear the synch command doorbell */ | ||
225 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
226 | |||
227 | /* Restore interrupt mask */ | ||
228 | aac_adapter_enable_int(dev); | ||
229 | return 0; | ||
230 | |||
231 | } | ||
232 | |||
233 | /** | ||
234 | * aac_src_interrupt_adapter - interrupt adapter | ||
235 | * @dev: Adapter | ||
236 | * | ||
237 | * Send an interrupt to the i960 and breakpoint it. | ||
238 | */ | ||
239 | |||
240 | static void aac_src_interrupt_adapter(struct aac_dev *dev) | ||
241 | { | ||
242 | src_sync_cmd(dev, BREAKPOINT_REQUEST, | ||
243 | 0, 0, 0, 0, 0, 0, | ||
244 | NULL, NULL, NULL, NULL, NULL); | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * aac_src_notify_adapter - send an event to the adapter | ||
249 | * @dev: Adapter | ||
250 | * @event: Event to send | ||
251 | * | ||
252 | * Notify the i960 that something it probably cares about has | ||
253 | * happened. | ||
254 | */ | ||
255 | |||
256 | static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) | ||
257 | { | ||
258 | switch (event) { | ||
259 | |||
260 | case AdapNormCmdQue: | ||
261 | src_writel(dev, MUnit.ODR_C, | ||
262 | INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); | ||
263 | break; | ||
264 | case HostNormRespNotFull: | ||
265 | src_writel(dev, MUnit.ODR_C, | ||
266 | INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); | ||
267 | break; | ||
268 | case AdapNormRespQue: | ||
269 | src_writel(dev, MUnit.ODR_C, | ||
270 | INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); | ||
271 | break; | ||
272 | case HostNormCmdNotFull: | ||
273 | src_writel(dev, MUnit.ODR_C, | ||
274 | INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); | ||
275 | break; | ||
276 | case FastIo: | ||
277 | src_writel(dev, MUnit.ODR_C, | ||
278 | INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); | ||
279 | break; | ||
280 | case AdapPrintfDone: | ||
281 | src_writel(dev, MUnit.ODR_C, | ||
282 | INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); | ||
283 | break; | ||
284 | default: | ||
285 | BUG(); | ||
286 | break; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * aac_src_start_adapter - activate adapter | ||
292 | * @dev: Adapter | ||
293 | * | ||
294 | * Start up processing on an i960 based AAC adapter | ||
295 | */ | ||
296 | |||
297 | static void aac_src_start_adapter(struct aac_dev *dev) | ||
298 | { | ||
299 | struct aac_init *init; | ||
300 | |||
301 | init = dev->init; | ||
302 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | ||
303 | |||
304 | /* We can only use a 32 bit address here */ | ||
305 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | ||
306 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * aac_src_check_health | ||
311 | * @dev: device to check if healthy | ||
312 | * | ||
313 | * Will attempt to determine if the specified adapter is alive and | ||
314 | * capable of handling requests, returning 0 if alive. | ||
315 | */ | ||
316 | static int aac_src_check_health(struct aac_dev *dev) | ||
317 | { | ||
318 | u32 status = src_readl(dev, MUnit.OMR); | ||
319 | |||
320 | /* | ||
321 | * Check to see if the board failed any self tests. | ||
322 | */ | ||
323 | if (unlikely(status & SELF_TEST_FAILED)) | ||
324 | return -1; | ||
325 | |||
326 | /* | ||
327 | * Check to see if the board panic'd. | ||
328 | */ | ||
329 | if (unlikely(status & KERNEL_PANIC)) | ||
330 | return (status >> 16) & 0xFF; | ||
331 | /* | ||
332 | * Wait for the adapter to be up and running. | ||
333 | */ | ||
334 | if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) | ||
335 | return -3; | ||
336 | /* | ||
337 | * Everything is OK | ||
338 | */ | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * aac_src_deliver_message | ||
344 | * @fib: fib to issue | ||
345 | * | ||
346 | * Will send a fib, returning 0 if successful. | ||
347 | */ | ||
348 | static int aac_src_deliver_message(struct fib *fib) | ||
349 | { | ||
350 | struct aac_dev *dev = fib->dev; | ||
351 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | ||
352 | unsigned long qflags; | ||
353 | u32 fibsize; | ||
354 | u64 address; | ||
355 | struct aac_fib_xporthdr *pFibX; | ||
356 | |||
357 | spin_lock_irqsave(q->lock, qflags); | ||
358 | q->numpending++; | ||
359 | spin_unlock_irqrestore(q->lock, qflags); | ||
360 | |||
361 | /* Calculate the amount to the fibsize bits */ | ||
362 | fibsize = (sizeof(struct aac_fib_xporthdr) + | ||
363 | fib->hw_fib_va->header.Size + 127) / 128 - 1; | ||
364 | if (fibsize > (ALIGN32 - 1)) | ||
365 | fibsize = ALIGN32 - 1; | ||
366 | |||
367 | /* Fill XPORT header */ | ||
368 | pFibX = (struct aac_fib_xporthdr *) | ||
369 | ((unsigned char *)fib->hw_fib_va - | ||
370 | sizeof(struct aac_fib_xporthdr)); | ||
371 | pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; | ||
372 | pFibX->HostAddress = fib->hw_fib_pa; | ||
373 | pFibX->Size = fib->hw_fib_va->header.Size; | ||
374 | address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); | ||
375 | |||
376 | src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); | ||
377 | src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * aac_src_ioremap | ||
383 | * @size: mapping resize request | ||
384 | * | ||
385 | */ | ||
386 | static int aac_src_ioremap(struct aac_dev *dev, u32 size) | ||
387 | { | ||
388 | if (!size) { | ||
389 | iounmap(dev->regs.src.bar0); | ||
390 | dev->regs.src.bar0 = NULL; | ||
391 | iounmap(dev->base); | ||
392 | dev->base = NULL; | ||
393 | return 0; | ||
394 | } | ||
395 | dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), | ||
396 | AAC_MIN_SRC_BAR1_SIZE); | ||
397 | dev->base = NULL; | ||
398 | if (dev->regs.src.bar1 == NULL) | ||
399 | return -1; | ||
400 | dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, | ||
401 | size); | ||
402 | if (dev->base == NULL) { | ||
403 | iounmap(dev->regs.src.bar1); | ||
404 | dev->regs.src.bar1 = NULL; | ||
405 | return -1; | ||
406 | } | ||
407 | dev->IndexRegs = &((struct src_registers __iomem *) | ||
408 | dev->base)->IndexRegs; | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int aac_src_restart_adapter(struct aac_dev *dev, int bled) | ||
413 | { | ||
414 | u32 var, reset_mask; | ||
415 | |||
416 | if (bled >= 0) { | ||
417 | if (bled) | ||
418 | printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", | ||
419 | dev->name, dev->id, bled); | ||
420 | bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, | ||
421 | 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); | ||
422 | if (bled || (var != 0x00000001)) | ||
423 | bled = -EINVAL; | ||
424 | if (dev->supplement_adapter_info.SupportedOptions2 & | ||
425 | AAC_OPTION_DOORBELL_RESET) { | ||
426 | src_writel(dev, MUnit.IDR, reset_mask); | ||
427 | msleep(5000); /* Delay 5 seconds */ | ||
428 | } | ||
429 | } | ||
430 | |||
431 | if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) | ||
432 | return -ENODEV; | ||
433 | |||
434 | if (startup_timeout < 300) | ||
435 | startup_timeout = 300; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * aac_src_select_comm - Select communications method | ||
442 | * @dev: Adapter | ||
443 | * @comm: communications method | ||
444 | */ | ||
445 | int aac_src_select_comm(struct aac_dev *dev, int comm) | ||
446 | { | ||
447 | switch (comm) { | ||
448 | case AAC_COMM_MESSAGE: | ||
449 | dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; | ||
450 | dev->a_ops.adapter_intr = aac_src_intr_message; | ||
451 | dev->a_ops.adapter_deliver = aac_src_deliver_message; | ||
452 | break; | ||
453 | default: | ||
454 | return 1; | ||
455 | } | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * aac_src_init - initialize an Cardinal Frey Bar card | ||
461 | * @dev: device to configure | ||
462 | * | ||
463 | */ | ||
464 | |||
465 | int aac_src_init(struct aac_dev *dev) | ||
466 | { | ||
467 | unsigned long start; | ||
468 | unsigned long status; | ||
469 | int restart = 0; | ||
470 | int instance = dev->id; | ||
471 | const char *name = dev->name; | ||
472 | |||
473 | dev->a_ops.adapter_ioremap = aac_src_ioremap; | ||
474 | dev->a_ops.adapter_comm = aac_src_select_comm; | ||
475 | |||
476 | dev->base_size = AAC_MIN_SRC_BAR0_SIZE; | ||
477 | if (aac_adapter_ioremap(dev, dev->base_size)) { | ||
478 | printk(KERN_WARNING "%s: unable to map adapter.\n", name); | ||
479 | goto error_iounmap; | ||
480 | } | ||
481 | |||
482 | /* Failure to reset here is an option ... */ | ||
483 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
484 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; | ||
485 | if ((aac_reset_devices || reset_devices) && | ||
486 | !aac_src_restart_adapter(dev, 0)) | ||
487 | ++restart; | ||
488 | /* | ||
489 | * Check to see if the board panic'd while booting. | ||
490 | */ | ||
491 | status = src_readl(dev, MUnit.OMR); | ||
492 | if (status & KERNEL_PANIC) { | ||
493 | if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) | ||
494 | goto error_iounmap; | ||
495 | ++restart; | ||
496 | } | ||
497 | /* | ||
498 | * Check to see if the board failed any self tests. | ||
499 | */ | ||
500 | status = src_readl(dev, MUnit.OMR); | ||
501 | if (status & SELF_TEST_FAILED) { | ||
502 | printk(KERN_ERR "%s%d: adapter self-test failed.\n", | ||
503 | dev->name, instance); | ||
504 | goto error_iounmap; | ||
505 | } | ||
506 | /* | ||
507 | * Check to see if the monitor panic'd while booting. | ||
508 | */ | ||
509 | if (status & MONITOR_PANIC) { | ||
510 | printk(KERN_ERR "%s%d: adapter monitor panic.\n", | ||
511 | dev->name, instance); | ||
512 | goto error_iounmap; | ||
513 | } | ||
514 | start = jiffies; | ||
515 | /* | ||
516 | * Wait for the adapter to be up and running. Wait up to 3 minutes | ||
517 | */ | ||
518 | while (!((status = src_readl(dev, MUnit.OMR)) & | ||
519 | KERNEL_UP_AND_RUNNING)) { | ||
520 | if ((restart && | ||
521 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || | ||
522 | time_after(jiffies, start+HZ*startup_timeout)) { | ||
523 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", | ||
524 | dev->name, instance, status); | ||
525 | goto error_iounmap; | ||
526 | } | ||
527 | if (!restart && | ||
528 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || | ||
529 | time_after(jiffies, start + HZ * | ||
530 | ((startup_timeout > 60) | ||
531 | ? (startup_timeout - 60) | ||
532 | : (startup_timeout / 2))))) { | ||
533 | if (likely(!aac_src_restart_adapter(dev, | ||
534 | aac_src_check_health(dev)))) | ||
535 | start = jiffies; | ||
536 | ++restart; | ||
537 | } | ||
538 | msleep(1); | ||
539 | } | ||
540 | if (restart && aac_commit) | ||
541 | aac_commit = 1; | ||
542 | /* | ||
543 | * Fill in the common function dispatch table. | ||
544 | */ | ||
545 | dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; | ||
546 | dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; | ||
547 | dev->a_ops.adapter_notify = aac_src_notify_adapter; | ||
548 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
549 | dev->a_ops.adapter_check_health = aac_src_check_health; | ||
550 | dev->a_ops.adapter_restart = aac_src_restart_adapter; | ||
551 | |||
552 | /* | ||
553 | * First clear out all interrupts. Then enable the one's that we | ||
554 | * can handle. | ||
555 | */ | ||
556 | aac_adapter_comm(dev, AAC_COMM_MESSAGE); | ||
557 | aac_adapter_disable_int(dev); | ||
558 | src_writel(dev, MUnit.ODR_C, 0xffffffff); | ||
559 | aac_adapter_enable_int(dev); | ||
560 | |||
561 | if (aac_init_adapter(dev) == NULL) | ||
562 | goto error_iounmap; | ||
563 | if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) | ||
564 | goto error_iounmap; | ||
565 | |||
566 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); | ||
567 | |||
568 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, | ||
569 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | ||
570 | |||
571 | if (dev->msi) | ||
572 | pci_disable_msi(dev->pdev); | ||
573 | |||
574 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | ||
575 | name, instance); | ||
576 | goto error_iounmap; | ||
577 | } | ||
578 | dev->dbg_base = pci_resource_start(dev->pdev, 2); | ||
579 | dev->dbg_base_mapped = dev->regs.src.bar1; | ||
580 | dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; | ||
581 | |||
582 | aac_adapter_enable_int(dev); | ||
583 | /* | ||
584 | * Tell the adapter that all is configured, and it can | ||
585 | * start accepting requests | ||
586 | */ | ||
587 | aac_src_start_adapter(dev); | ||
588 | |||
589 | return 0; | ||
590 | |||
591 | error_iounmap: | ||
592 | |||
593 | return -1; | ||
594 | } | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index df2fc09ba479..b6d350ac4288 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "bnx2fc_constants.h" | 62 | #include "bnx2fc_constants.h" |
63 | 63 | ||
64 | #define BNX2FC_NAME "bnx2fc" | 64 | #define BNX2FC_NAME "bnx2fc" |
65 | #define BNX2FC_VERSION "1.0.0" | 65 | #define BNX2FC_VERSION "1.0.1" |
66 | 66 | ||
67 | #define PFX "bnx2fc: " | 67 | #define PFX "bnx2fc: " |
68 | 68 | ||
@@ -84,9 +84,15 @@ | |||
84 | #define BNX2FC_NUM_MAX_SESS 128 | 84 | #define BNX2FC_NUM_MAX_SESS 128 |
85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) | 85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) |
86 | 86 | ||
87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 | 87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 |
88 | #define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS | ||
89 | #define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE | ||
88 | #define BNX2FC_MIN_PAYLOAD 256 | 90 | #define BNX2FC_MIN_PAYLOAD 256 |
89 | #define BNX2FC_MAX_PAYLOAD 2048 | 91 | #define BNX2FC_MAX_PAYLOAD 2048 |
92 | #define BNX2FC_MFS \ | ||
93 | (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) | ||
94 | #define BNX2FC_MINI_JUMBO_MTU 2500 | ||
95 | |||
90 | 96 | ||
91 | #define BNX2FC_RQ_BUF_SZ 256 | 97 | #define BNX2FC_RQ_BUF_SZ 256 |
92 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) | 98 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) |
@@ -98,7 +104,8 @@ | |||
98 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) | 104 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) |
99 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 | 105 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 |
100 | 106 | ||
101 | #define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS | 107 | #define BNX2FC_MAX_TASKS \ |
108 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) | ||
102 | #define BNX2FC_TASK_SIZE 128 | 109 | #define BNX2FC_TASK_SIZE 128 |
103 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) | 110 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) |
104 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) | 111 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) |
@@ -112,10 +119,10 @@ | |||
112 | #define BNX2FC_WRITE (1 << 0) | 119 | #define BNX2FC_WRITE (1 << 0) |
113 | 120 | ||
114 | #define BNX2FC_MIN_XID 0 | 121 | #define BNX2FC_MIN_XID 0 |
115 | #define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) | 122 | #define BNX2FC_MAX_XID \ |
116 | #define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) | 123 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) |
117 | #define FCOE_MAX_XID \ | 124 | #define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) |
118 | (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) | 125 | #define FCOE_MAX_XID (FCOE_MIN_XID + 4095) |
119 | #define BNX2FC_MAX_LUN 0xFFFF | 126 | #define BNX2FC_MAX_LUN 0xFFFF |
120 | #define BNX2FC_MAX_FCP_TGT 256 | 127 | #define BNX2FC_MAX_FCP_TGT 256 |
121 | #define BNX2FC_MAX_CMD_LEN 16 | 128 | #define BNX2FC_MAX_CMD_LEN 16 |
@@ -125,7 +132,6 @@ | |||
125 | 132 | ||
126 | #define BNX2FC_WAIT_CNT 120 | 133 | #define BNX2FC_WAIT_CNT 120 |
127 | #define BNX2FC_FW_TIMEOUT (3 * HZ) | 134 | #define BNX2FC_FW_TIMEOUT (3 * HZ) |
128 | |||
129 | #define PORT_MAX 2 | 135 | #define PORT_MAX 2 |
130 | 136 | ||
131 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) | 137 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index e476e8753079..e2e647509a73 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
21 | 21 | ||
22 | #define DRV_MODULE_NAME "bnx2fc" | 22 | #define DRV_MODULE_NAME "bnx2fc" |
23 | #define DRV_MODULE_VERSION BNX2FC_VERSION | 23 | #define DRV_MODULE_VERSION BNX2FC_VERSION |
24 | #define DRV_MODULE_RELDATE "Jan 25, 2011" | 24 | #define DRV_MODULE_RELDATE "Mar 17, 2011" |
25 | 25 | ||
26 | 26 | ||
27 | static char version[] __devinitdata = | 27 | static char version[] __devinitdata = |
@@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg) | |||
437 | set_current_state(TASK_INTERRUPTIBLE); | 437 | set_current_state(TASK_INTERRUPTIBLE); |
438 | while (!kthread_should_stop()) { | 438 | while (!kthread_should_stop()) { |
439 | schedule(); | 439 | schedule(); |
440 | set_current_state(TASK_RUNNING); | ||
441 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 440 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
442 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { | 441 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { |
443 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 442 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
444 | bnx2fc_recv_frame(skb); | 443 | bnx2fc_recv_frame(skb); |
445 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 444 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
446 | } | 445 | } |
446 | __set_current_state(TASK_INTERRUPTIBLE); | ||
447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
448 | set_current_state(TASK_INTERRUPTIBLE); | ||
449 | } | 448 | } |
450 | set_current_state(TASK_RUNNING); | 449 | __set_current_state(TASK_RUNNING); |
451 | return 0; | 450 | return 0; |
452 | } | 451 | } |
453 | 452 | ||
@@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
569 | set_current_state(TASK_INTERRUPTIBLE); | 568 | set_current_state(TASK_INTERRUPTIBLE); |
570 | while (!kthread_should_stop()) { | 569 | while (!kthread_should_stop()) { |
571 | schedule(); | 570 | schedule(); |
572 | set_current_state(TASK_RUNNING); | ||
573 | spin_lock_bh(&p->fp_work_lock); | 571 | spin_lock_bh(&p->fp_work_lock); |
574 | while (!list_empty(&p->work_list)) { | 572 | while (!list_empty(&p->work_list)) { |
575 | list_splice_init(&p->work_list, &work_list); | 573 | list_splice_init(&p->work_list, &work_list); |
@@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
583 | 581 | ||
584 | spin_lock_bh(&p->fp_work_lock); | 582 | spin_lock_bh(&p->fp_work_lock); |
585 | } | 583 | } |
584 | __set_current_state(TASK_INTERRUPTIBLE); | ||
586 | spin_unlock_bh(&p->fp_work_lock); | 585 | spin_unlock_bh(&p->fp_work_lock); |
587 | set_current_state(TASK_INTERRUPTIBLE); | ||
588 | } | 586 | } |
589 | set_current_state(TASK_RUNNING); | 587 | __set_current_state(TASK_RUNNING); |
590 | 588 | ||
591 | return 0; | 589 | return 0; |
592 | } | 590 | } |
@@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
661 | return 0; | 659 | return 0; |
662 | } | 660 | } |
663 | 661 | ||
664 | static int bnx2fc_mfs_update(struct fc_lport *lport) | ||
665 | { | ||
666 | struct fcoe_port *port = lport_priv(lport); | ||
667 | struct bnx2fc_hba *hba = port->priv; | ||
668 | struct net_device *netdev = hba->netdev; | ||
669 | u32 mfs; | ||
670 | u32 max_mfs; | ||
671 | |||
672 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + | ||
673 | sizeof(struct fcoe_crc_eof)); | ||
674 | max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); | ||
675 | BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); | ||
676 | if (mfs > max_mfs) | ||
677 | mfs = max_mfs; | ||
678 | |||
679 | /* Adjust mfs to be a multiple of 256 bytes */ | ||
680 | mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * | ||
681 | BNX2FC_MIN_PAYLOAD); | ||
682 | mfs = mfs + sizeof(struct fc_frame_header); | ||
683 | |||
684 | BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); | ||
685 | if (fc_set_mfs(lport, mfs)) | ||
686 | return -EINVAL; | ||
687 | return 0; | ||
688 | } | ||
689 | static void bnx2fc_link_speed_update(struct fc_lport *lport) | 662 | static void bnx2fc_link_speed_update(struct fc_lport *lport) |
690 | { | 663 | { |
691 | struct fcoe_port *port = lport_priv(lport); | 664 | struct fcoe_port *port = lport_priv(lport); |
@@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport) | |||
754 | !hba->phys_dev->ethtool_ops->get_pauseparam) | 727 | !hba->phys_dev->ethtool_ops->get_pauseparam) |
755 | return -EOPNOTSUPP; | 728 | return -EOPNOTSUPP; |
756 | 729 | ||
757 | if (bnx2fc_mfs_update(lport)) | 730 | if (fc_set_mfs(lport, BNX2FC_MFS)) |
758 | return -EINVAL; | 731 | return -EINVAL; |
759 | 732 | ||
760 | skb_queue_head_init(&port->fcoe_pending_queue); | 733 | skb_queue_head_init(&port->fcoe_pending_queue); |
@@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) | |||
825 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) | 798 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) |
826 | printk(KERN_ERR "indicate_netevent: "\ | 799 | printk(KERN_ERR "indicate_netevent: "\ |
827 | "adapter is not UP!!\n"); | 800 | "adapter is not UP!!\n"); |
828 | /* fall thru to update mfs if MTU has changed */ | ||
829 | case NETDEV_CHANGEMTU: | ||
830 | BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); | ||
831 | bnx2fc_mfs_update(lport); | ||
832 | mutex_lock(&lport->lp_mutex); | ||
833 | list_for_each_entry(vport, &lport->vports, list) | ||
834 | bnx2fc_mfs_update(vport); | ||
835 | mutex_unlock(&lport->lp_mutex); | ||
836 | break; | 801 | break; |
837 | 802 | ||
838 | case NETDEV_DOWN: | 803 | case NETDEV_DOWN: |
@@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) | |||
1095 | struct netdev_hw_addr *ha; | 1060 | struct netdev_hw_addr *ha; |
1096 | int sel_san_mac = 0; | 1061 | int sel_san_mac = 0; |
1097 | 1062 | ||
1098 | /* Do not support for bonding device */ | ||
1099 | if ((netdev->priv_flags & IFF_MASTER_ALB) || | ||
1100 | (netdev->priv_flags & IFF_SLAVE_INACTIVE) || | ||
1101 | (netdev->priv_flags & IFF_MASTER_8023AD)) { | ||
1102 | return -EOPNOTSUPP; | ||
1103 | } | ||
1104 | |||
1105 | /* setup Source MAC Address */ | 1063 | /* setup Source MAC Address */ |
1106 | rcu_read_lock(); | 1064 | rcu_read_lock(); |
1107 | for_each_dev_addr(physdev, ha) { | 1065 | for_each_dev_addr(physdev, ha) { |
@@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev) | |||
1432 | struct net_device *phys_dev; | 1390 | struct net_device *phys_dev; |
1433 | int rc = 0; | 1391 | int rc = 0; |
1434 | 1392 | ||
1435 | if (!rtnl_trylock()) | 1393 | rtnl_lock(); |
1436 | return restart_syscall(); | ||
1437 | 1394 | ||
1438 | mutex_lock(&bnx2fc_dev_lock); | 1395 | mutex_lock(&bnx2fc_dev_lock); |
1439 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | ||
1440 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1441 | rc = -ENODEV; | ||
1442 | goto netdev_err; | ||
1443 | } | ||
1444 | #endif | ||
1445 | /* obtain physical netdev */ | 1396 | /* obtain physical netdev */ |
1446 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1397 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1447 | phys_dev = vlan_dev_real_dev(netdev); | 1398 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev) | |||
1805 | struct ethtool_drvinfo drvinfo; | 1756 | struct ethtool_drvinfo drvinfo; |
1806 | int rc = 0; | 1757 | int rc = 0; |
1807 | 1758 | ||
1808 | if (!rtnl_trylock()) { | 1759 | rtnl_lock(); |
1809 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1810 | return -EIO; | ||
1811 | } | ||
1812 | 1760 | ||
1813 | mutex_lock(&bnx2fc_dev_lock); | 1761 | mutex_lock(&bnx2fc_dev_lock); |
1814 | 1762 | ||
1815 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1816 | rc = -ENODEV; | ||
1817 | goto nodev; | ||
1818 | } | ||
1819 | |||
1820 | /* obtain physical netdev */ | 1763 | /* obtain physical netdev */ |
1821 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1764 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1822 | phys_dev = vlan_dev_real_dev(netdev); | 1765 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev) | |||
1867 | struct ethtool_drvinfo drvinfo; | 1810 | struct ethtool_drvinfo drvinfo; |
1868 | int rc = 0; | 1811 | int rc = 0; |
1869 | 1812 | ||
1870 | if (!rtnl_trylock()) { | 1813 | rtnl_lock(); |
1871 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1872 | return -EIO; | ||
1873 | } | ||
1874 | 1814 | ||
1875 | BNX2FC_MISC_DBG("Entered %s\n", __func__); | 1815 | BNX2FC_MISC_DBG("Entered %s\n", __func__); |
1876 | mutex_lock(&bnx2fc_dev_lock); | 1816 | mutex_lock(&bnx2fc_dev_lock); |
1877 | 1817 | ||
1878 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1879 | rc = -ENODEV; | ||
1880 | goto nodev; | ||
1881 | } | ||
1882 | |||
1883 | /* obtain physical netdev */ | 1818 | /* obtain physical netdev */ |
1884 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1819 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1885 | phys_dev = vlan_dev_real_dev(netdev); | 1820 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | |||
1942 | return -EIO; | 1877 | return -EIO; |
1943 | } | 1878 | } |
1944 | 1879 | ||
1945 | if (!rtnl_trylock()) { | 1880 | rtnl_lock(); |
1946 | printk(KERN_ERR "trying for rtnl_lock\n"); | ||
1947 | return -EIO; | ||
1948 | } | ||
1949 | mutex_lock(&bnx2fc_dev_lock); | ||
1950 | 1881 | ||
1951 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | 1882 | mutex_lock(&bnx2fc_dev_lock); |
1952 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1953 | rc = -ENODEV; | ||
1954 | goto mod_err; | ||
1955 | } | ||
1956 | #endif | ||
1957 | 1883 | ||
1958 | if (!try_module_get(THIS_MODULE)) { | 1884 | if (!try_module_get(THIS_MODULE)) { |
1959 | rc = -EINVAL; | 1885 | rc = -EINVAL; |
@@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = { | |||
2506 | .change_queue_type = fc_change_queue_type, | 2432 | .change_queue_type = fc_change_queue_type, |
2507 | .this_id = -1, | 2433 | .this_id = -1, |
2508 | .cmd_per_lun = 3, | 2434 | .cmd_per_lun = 3, |
2509 | .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), | 2435 | .can_queue = BNX2FC_CAN_QUEUE, |
2510 | .use_clustering = ENABLE_CLUSTERING, | 2436 | .use_clustering = ENABLE_CLUSTERING, |
2511 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, | 2437 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, |
2512 | .max_sectors = 512, | 2438 | .max_sectors = 512, |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 4f4096836742..1b680e288c56 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |||
87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; | 87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; |
88 | fcoe_init1.task_list_pbl_addr_hi = | 88 | fcoe_init1.task_list_pbl_addr_hi = |
89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); | 89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); |
90 | fcoe_init1.mtu = hba->netdev->mtu; | 90 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
91 | 91 | ||
92 | fcoe_init1.flags = (PAGE_SHIFT << | 92 | fcoe_init1.flags = (PAGE_SHIFT << |
93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); | 93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); |
@@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
590 | 590 | ||
591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; | 591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; |
592 | 592 | ||
593 | spin_lock_bh(&tgt->tgt_lock); | ||
593 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); | 594 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); |
595 | spin_unlock_bh(&tgt->tgt_lock); | ||
596 | |||
594 | if (rq_data) { | 597 | if (rq_data) { |
595 | buf = rq_data; | 598 | buf = rq_data; |
596 | } else { | 599 | } else { |
@@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
603 | } | 606 | } |
604 | 607 | ||
605 | for (i = 0; i < num_rq; i++) { | 608 | for (i = 0; i < num_rq; i++) { |
609 | spin_lock_bh(&tgt->tgt_lock); | ||
606 | rq_data = (unsigned char *) | 610 | rq_data = (unsigned char *) |
607 | bnx2fc_get_next_rqe(tgt, 1); | 611 | bnx2fc_get_next_rqe(tgt, 1); |
612 | spin_unlock_bh(&tgt->tgt_lock); | ||
608 | len = BNX2FC_RQ_BUF_SZ; | 613 | len = BNX2FC_RQ_BUF_SZ; |
609 | memcpy(buf1, rq_data, len); | 614 | memcpy(buf1, rq_data, len); |
610 | buf1 += len; | 615 | buf1 += len; |
@@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
615 | 620 | ||
616 | if (buf != rq_data) | 621 | if (buf != rq_data) |
617 | kfree(buf); | 622 | kfree(buf); |
623 | spin_lock_bh(&tgt->tgt_lock); | ||
618 | bnx2fc_return_rqe(tgt, num_rq); | 624 | bnx2fc_return_rqe(tgt, num_rq); |
625 | spin_unlock_bh(&tgt->tgt_lock); | ||
619 | break; | 626 | break; |
620 | 627 | ||
621 | case FCOE_ERROR_DETECTION_CQE_TYPE: | 628 | case FCOE_ERROR_DETECTION_CQE_TYPE: |
622 | /* | 629 | /* |
623 | *In case of error reporting CQE a single RQ entry | 630 | * In case of error reporting CQE a single RQ entry |
624 | * is consumes. | 631 | * is consumed. |
625 | */ | 632 | */ |
626 | spin_lock_bh(&tgt->tgt_lock); | 633 | spin_lock_bh(&tgt->tgt_lock); |
627 | num_rq = 1; | 634 | num_rq = 1; |
@@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
705 | *In case of warning reporting CQE a single RQ entry | 712 | *In case of warning reporting CQE a single RQ entry |
706 | * is consumes. | 713 | * is consumes. |
707 | */ | 714 | */ |
715 | spin_lock_bh(&tgt->tgt_lock); | ||
708 | num_rq = 1; | 716 | num_rq = 1; |
709 | err_entry = (struct fcoe_err_report_entry *) | 717 | err_entry = (struct fcoe_err_report_entry *) |
710 | bnx2fc_get_next_rqe(tgt, 1); | 718 | bnx2fc_get_next_rqe(tgt, 1); |
@@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
717 | err_entry->tx_buf_off, err_entry->rx_buf_off); | 725 | err_entry->tx_buf_off, err_entry->rx_buf_off); |
718 | 726 | ||
719 | bnx2fc_return_rqe(tgt, 1); | 727 | bnx2fc_return_rqe(tgt, 1); |
728 | spin_unlock_bh(&tgt->tgt_lock); | ||
720 | break; | 729 | break; |
721 | 730 | ||
722 | default: | 731 | default: |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0f1dd23730db..d3fc302c241a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -11,6 +11,9 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "bnx2fc.h" | 13 | #include "bnx2fc.h" |
14 | |||
15 | #define RESERVE_FREE_LIST_INDEX num_possible_cpus() | ||
16 | |||
14 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, | 17 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, |
15 | int bd_index); | 18 | int bd_index); |
16 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); | 19 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); |
@@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
242 | u32 mem_size; | 245 | u32 mem_size; |
243 | u16 xid; | 246 | u16 xid; |
244 | int i; | 247 | int i; |
245 | int num_ios; | 248 | int num_ios, num_pri_ios; |
246 | size_t bd_tbl_sz; | 249 | size_t bd_tbl_sz; |
250 | int arr_sz = num_possible_cpus() + 1; | ||
247 | 251 | ||
248 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { | 252 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { |
249 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ | 253 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ |
@@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
263 | } | 267 | } |
264 | 268 | ||
265 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * | 269 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * |
266 | num_possible_cpus(), GFP_KERNEL); | 270 | arr_sz, GFP_KERNEL); |
267 | if (!cmgr->free_list) { | 271 | if (!cmgr->free_list) { |
268 | printk(KERN_ERR PFX "failed to alloc free_list\n"); | 272 | printk(KERN_ERR PFX "failed to alloc free_list\n"); |
269 | goto mem_err; | 273 | goto mem_err; |
270 | } | 274 | } |
271 | 275 | ||
272 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * | 276 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * |
273 | num_possible_cpus(), GFP_KERNEL); | 277 | arr_sz, GFP_KERNEL); |
274 | if (!cmgr->free_list_lock) { | 278 | if (!cmgr->free_list_lock) { |
275 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); | 279 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); |
276 | goto mem_err; | 280 | goto mem_err; |
@@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
279 | cmgr->hba = hba; | 283 | cmgr->hba = hba; |
280 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); | 284 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); |
281 | 285 | ||
282 | for (i = 0; i < num_possible_cpus(); i++) { | 286 | for (i = 0; i < arr_sz; i++) { |
283 | INIT_LIST_HEAD(&cmgr->free_list[i]); | 287 | INIT_LIST_HEAD(&cmgr->free_list[i]); |
284 | spin_lock_init(&cmgr->free_list_lock[i]); | 288 | spin_lock_init(&cmgr->free_list_lock[i]); |
285 | } | 289 | } |
286 | 290 | ||
287 | /* Pre-allocated pool of bnx2fc_cmds */ | 291 | /* |
292 | * Pre-allocated pool of bnx2fc_cmds. | ||
293 | * Last entry in the free list array is the free list | ||
294 | * of slow path requests. | ||
295 | */ | ||
288 | xid = BNX2FC_MIN_XID; | 296 | xid = BNX2FC_MIN_XID; |
297 | num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; | ||
289 | for (i = 0; i < num_ios; i++) { | 298 | for (i = 0; i < num_ios; i++) { |
290 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); | 299 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); |
291 | 300 | ||
@@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
298 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); | 307 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); |
299 | 308 | ||
300 | io_req->xid = xid++; | 309 | io_req->xid = xid++; |
301 | if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) | 310 | if (i < num_pri_ios) |
302 | printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", | 311 | list_add_tail(&io_req->link, |
303 | io_req->xid); | 312 | &cmgr->free_list[io_req->xid % |
304 | list_add_tail(&io_req->link, | 313 | num_possible_cpus()]); |
305 | &cmgr->free_list[io_req->xid % num_possible_cpus()]); | 314 | else |
315 | list_add_tail(&io_req->link, | ||
316 | &cmgr->free_list[num_possible_cpus()]); | ||
306 | io_req++; | 317 | io_req++; |
307 | } | 318 | } |
308 | 319 | ||
@@ -389,7 +400,7 @@ free_cmd_pool: | |||
389 | if (!cmgr->free_list) | 400 | if (!cmgr->free_list) |
390 | goto free_cmgr; | 401 | goto free_cmgr; |
391 | 402 | ||
392 | for (i = 0; i < num_possible_cpus(); i++) { | 403 | for (i = 0; i < num_possible_cpus() + 1; i++) { |
393 | struct list_head *list; | 404 | struct list_head *list; |
394 | struct list_head *tmp; | 405 | struct list_head *tmp; |
395 | 406 | ||
@@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
413 | struct bnx2fc_cmd *io_req; | 424 | struct bnx2fc_cmd *io_req; |
414 | struct list_head *listp; | 425 | struct list_head *listp; |
415 | struct io_bdt *bd_tbl; | 426 | struct io_bdt *bd_tbl; |
427 | int index = RESERVE_FREE_LIST_INDEX; | ||
416 | u32 max_sqes; | 428 | u32 max_sqes; |
417 | u16 xid; | 429 | u16 xid; |
418 | 430 | ||
@@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
432 | * NOTE: Free list insertions and deletions are protected with | 444 | * NOTE: Free list insertions and deletions are protected with |
433 | * cmgr lock | 445 | * cmgr lock |
434 | */ | 446 | */ |
435 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 447 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
436 | if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || | 448 | if ((list_empty(&(cmd_mgr->free_list[index]))) || |
437 | (tgt->num_active_ios.counter >= max_sqes)) { | 449 | (tgt->num_active_ios.counter >= max_sqes)) { |
438 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " | 450 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " |
439 | "ios(%d):sqes(%d)\n", | 451 | "ios(%d):sqes(%d)\n", |
440 | tgt->num_active_ios.counter, tgt->max_sqes); | 452 | tgt->num_active_ios.counter, tgt->max_sqes); |
441 | if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) | 453 | if (list_empty(&(cmd_mgr->free_list[index]))) |
442 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); | 454 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); |
443 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 455 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
444 | return NULL; | 456 | return NULL; |
445 | } | 457 | } |
446 | 458 | ||
447 | listp = (struct list_head *) | 459 | listp = (struct list_head *) |
448 | cmd_mgr->free_list[smp_processor_id()].next; | 460 | cmd_mgr->free_list[index].next; |
449 | list_del_init(listp); | 461 | list_del_init(listp); |
450 | io_req = (struct bnx2fc_cmd *) listp; | 462 | io_req = (struct bnx2fc_cmd *) listp; |
451 | xid = io_req->xid; | 463 | xid = io_req->xid; |
452 | cmd_mgr->cmds[xid] = io_req; | 464 | cmd_mgr->cmds[xid] = io_req; |
453 | atomic_inc(&tgt->num_active_ios); | 465 | atomic_inc(&tgt->num_active_ios); |
454 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 466 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
455 | 467 | ||
456 | INIT_LIST_HEAD(&io_req->link); | 468 | INIT_LIST_HEAD(&io_req->link); |
457 | 469 | ||
@@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
479 | struct io_bdt *bd_tbl; | 491 | struct io_bdt *bd_tbl; |
480 | u32 max_sqes; | 492 | u32 max_sqes; |
481 | u16 xid; | 493 | u16 xid; |
494 | int index = get_cpu(); | ||
482 | 495 | ||
483 | max_sqes = BNX2FC_SCSI_MAX_SQES; | 496 | max_sqes = BNX2FC_SCSI_MAX_SQES; |
484 | /* | 497 | /* |
485 | * NOTE: Free list insertions and deletions are protected with | 498 | * NOTE: Free list insertions and deletions are protected with |
486 | * cmgr lock | 499 | * cmgr lock |
487 | */ | 500 | */ |
488 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 501 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
489 | if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || | 502 | if ((list_empty(&cmd_mgr->free_list[index])) || |
490 | (tgt->num_active_ios.counter >= max_sqes)) { | 503 | (tgt->num_active_ios.counter >= max_sqes)) { |
491 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 504 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
505 | put_cpu(); | ||
492 | return NULL; | 506 | return NULL; |
493 | } | 507 | } |
494 | 508 | ||
495 | listp = (struct list_head *) | 509 | listp = (struct list_head *) |
496 | cmd_mgr->free_list[smp_processor_id()].next; | 510 | cmd_mgr->free_list[index].next; |
497 | list_del_init(listp); | 511 | list_del_init(listp); |
498 | io_req = (struct bnx2fc_cmd *) listp; | 512 | io_req = (struct bnx2fc_cmd *) listp; |
499 | xid = io_req->xid; | 513 | xid = io_req->xid; |
500 | cmd_mgr->cmds[xid] = io_req; | 514 | cmd_mgr->cmds[xid] = io_req; |
501 | atomic_inc(&tgt->num_active_ios); | 515 | atomic_inc(&tgt->num_active_ios); |
502 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 516 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
517 | put_cpu(); | ||
503 | 518 | ||
504 | INIT_LIST_HEAD(&io_req->link); | 519 | INIT_LIST_HEAD(&io_req->link); |
505 | 520 | ||
@@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
522 | struct bnx2fc_cmd *io_req = container_of(ref, | 537 | struct bnx2fc_cmd *io_req = container_of(ref, |
523 | struct bnx2fc_cmd, refcount); | 538 | struct bnx2fc_cmd, refcount); |
524 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; | 539 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; |
540 | int index; | ||
541 | |||
542 | if (io_req->cmd_type == BNX2FC_SCSI_CMD) | ||
543 | index = io_req->xid % num_possible_cpus(); | ||
544 | else | ||
545 | index = RESERVE_FREE_LIST_INDEX; | ||
525 | 546 | ||
526 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 547 | |
548 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | ||
527 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) | 549 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) |
528 | bnx2fc_free_mp_resc(io_req); | 550 | bnx2fc_free_mp_resc(io_req); |
529 | cmd_mgr->cmds[io_req->xid] = NULL; | 551 | cmd_mgr->cmds[io_req->xid] = NULL; |
@@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
531 | list_del_init(&io_req->link); | 553 | list_del_init(&io_req->link); |
532 | /* Add it to the free list */ | 554 | /* Add it to the free list */ |
533 | list_add(&io_req->link, | 555 | list_add(&io_req->link, |
534 | &cmd_mgr->free_list[smp_processor_id()]); | 556 | &cmd_mgr->free_list[index]); |
535 | atomic_dec(&io_req->tgt->num_active_ios); | 557 | atomic_dec(&io_req->tgt->num_active_ios); |
536 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 558 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
559 | |||
537 | } | 560 | } |
538 | 561 | ||
539 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | 562 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 7ea93af60260..7cc05e4e82d5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port, | |||
304 | " not sent to FW\n"); | 304 | " not sent to FW\n"); |
305 | 305 | ||
306 | /* Free session resources */ | 306 | /* Free session resources */ |
307 | spin_lock_bh(&tgt->cq_lock); | ||
308 | bnx2fc_free_session_resc(hba, tgt); | 307 | bnx2fc_free_session_resc(hba, tgt); |
309 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | 308 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); |
310 | spin_unlock_bh(&tgt->cq_lock); | ||
311 | } | 309 | } |
312 | 310 | ||
313 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | 311 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, |
@@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |||
830 | tgt->rq = NULL; | 828 | tgt->rq = NULL; |
831 | } | 829 | } |
832 | /* Free CQ */ | 830 | /* Free CQ */ |
831 | spin_lock_bh(&tgt->cq_lock); | ||
833 | if (tgt->cq) { | 832 | if (tgt->cq) { |
834 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 833 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
835 | tgt->cq, tgt->cq_dma); | 834 | tgt->cq, tgt->cq_dma); |
836 | tgt->cq = NULL; | 835 | tgt->cq = NULL; |
837 | } | 836 | } |
837 | spin_unlock_bh(&tgt->cq_lock); | ||
838 | /* Free SQ */ | 838 | /* Free SQ */ |
839 | if (tgt->sq) { | 839 | if (tgt->sq) { |
840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 8eeb39ffa37f..e98ae33f1295 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) | |||
132 | if (page_count(sg_page(sg)) >= 1 && !recv) | 132 | if (page_count(sg_page(sg)) >= 1 && !recv) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | 135 | if (recv) { |
136 | segment->atomic_mapped = true; | ||
137 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | ||
138 | } else { | ||
139 | segment->atomic_mapped = false; | ||
140 | /* the xmit path can sleep with the page mapped so use kmap */ | ||
141 | segment->sg_mapped = kmap(sg_page(sg)); | ||
142 | } | ||
143 | |||
136 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; | 144 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; |
137 | } | 145 | } |
138 | 146 | ||
139 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) | 147 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) |
140 | { | 148 | { |
141 | if (segment->sg_mapped) { | 149 | if (segment->sg_mapped) { |
142 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | 150 | if (segment->atomic_mapped) |
151 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | ||
152 | else | ||
153 | kunmap(sg_page(segment->sg)); | ||
143 | segment->sg_mapped = NULL; | 154 | segment->sg_mapped = NULL; |
144 | segment->data = NULL; | 155 | segment->data = NULL; |
145 | } | 156 | } |
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 14de249917f8..88928f00aa2d 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | #/******************************************************************* | 1 | #/******************************************************************* |
2 | # * This file is part of the Emulex Linux Device Driver for * | 2 | # * This file is part of the Emulex Linux Device Driver for * |
3 | # * Fibre Channel Host Bus Adapters. * | 3 | # * Fibre Channel Host Bus Adapters. * |
4 | # * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | # * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | # * EMULEX and SLI are trademarks of Emulex. * | 5 | # * EMULEX and SLI are trademarks of Emulex. * |
6 | # * www.emulex.com * | 6 | # * www.emulex.com * |
7 | # * * | 7 | # * * |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b64c6da870d3..60e98a62f308 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -539,6 +539,8 @@ struct lpfc_hba { | |||
539 | (struct lpfc_hba *, uint32_t); | 539 | (struct lpfc_hba *, uint32_t); |
540 | int (*lpfc_hba_down_link) | 540 | int (*lpfc_hba_down_link) |
541 | (struct lpfc_hba *, uint32_t); | 541 | (struct lpfc_hba *, uint32_t); |
542 | int (*lpfc_selective_reset) | ||
543 | (struct lpfc_hba *); | ||
542 | 544 | ||
543 | /* SLI4 specific HBA data structure */ | 545 | /* SLI4 specific HBA data structure */ |
544 | struct lpfc_sli4_hba sli4_hba; | 546 | struct lpfc_sli4_hba sli4_hba; |
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) | |||
895 | return; | 897 | return; |
896 | } | 898 | } |
897 | 899 | ||
898 | static inline void | 900 | static inline int |
901 | lpfc_readl(void __iomem *addr, uint32_t *data) | ||
902 | { | ||
903 | uint32_t temp; | ||
904 | temp = readl(addr); | ||
905 | if (temp == 0xffffffff) | ||
906 | return -EIO; | ||
907 | *data = temp; | ||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | static inline int | ||
899 | lpfc_sli_read_hs(struct lpfc_hba *phba) | 912 | lpfc_sli_read_hs(struct lpfc_hba *phba) |
900 | { | 913 | { |
901 | /* | 914 | /* |
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) | |||
904 | */ | 917 | */ |
905 | phba->sli.slistat.err_attn_event++; | 918 | phba->sli.slistat.err_attn_event++; |
906 | 919 | ||
907 | /* Save status info */ | 920 | /* Save status info and check for unplug error */ |
908 | phba->work_hs = readl(phba->HSregaddr); | 921 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || |
909 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | 922 | lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || |
910 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | 923 | lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { |
924 | return -EIO; | ||
925 | } | ||
911 | 926 | ||
912 | /* Clear chip Host Attention error bit */ | 927 | /* Clear chip Host Attention error bit */ |
913 | writel(HA_ERATT, phba->HAregaddr); | 928 | writel(HA_ERATT, phba->HAregaddr); |
914 | readl(phba->HAregaddr); /* flush */ | 929 | readl(phba->HAregaddr); /* flush */ |
915 | phba->pport->stopped = 1; | 930 | phba->pport->stopped = 1; |
916 | 931 | ||
917 | return; | 932 | return 0; |
918 | } | 933 | } |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e7c020df12fa..4e0faa00b96f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) | |||
685 | * -EIO reset not configured or error posting the event | 685 | * -EIO reset not configured or error posting the event |
686 | * zero for success | 686 | * zero for success |
687 | **/ | 687 | **/ |
688 | static int | 688 | int |
689 | lpfc_selective_reset(struct lpfc_hba *phba) | 689 | lpfc_selective_reset(struct lpfc_hba *phba) |
690 | { | 690 | { |
691 | struct completion online_compl; | 691 | struct completion online_compl; |
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, | |||
746 | int status = -EINVAL; | 746 | int status = -EINVAL; |
747 | 747 | ||
748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) | 748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) |
749 | status = lpfc_selective_reset(phba); | 749 | status = phba->lpfc_selective_reset(phba); |
750 | 750 | ||
751 | if (status == 0) | 751 | if (status == 0) |
752 | return strlen(buf); | 752 | return strlen(buf); |
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1224 | if (val & ENABLE_FCP_RING_POLLING) { | 1224 | if (val & ENABLE_FCP_RING_POLLING) { |
1225 | if ((val & DISABLE_FCP_RING_INT) && | 1225 | if ((val & DISABLE_FCP_RING_INT) && |
1226 | !(old_val & DISABLE_FCP_RING_INT)) { | 1226 | !(old_val & DISABLE_FCP_RING_INT)) { |
1227 | creg_val = readl(phba->HCregaddr); | 1227 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1228 | spin_unlock_irq(&phba->hbalock); | ||
1229 | return -EINVAL; | ||
1230 | } | ||
1228 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 1231 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
1229 | writel(creg_val, phba->HCregaddr); | 1232 | writel(creg_val, phba->HCregaddr); |
1230 | readl(phba->HCregaddr); /* flush */ | 1233 | readl(phba->HCregaddr); /* flush */ |
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1242 | spin_unlock_irq(&phba->hbalock); | 1245 | spin_unlock_irq(&phba->hbalock); |
1243 | del_timer(&phba->fcp_poll_timer); | 1246 | del_timer(&phba->fcp_poll_timer); |
1244 | spin_lock_irq(&phba->hbalock); | 1247 | spin_lock_irq(&phba->hbalock); |
1245 | creg_val = readl(phba->HCregaddr); | 1248 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1249 | spin_unlock_irq(&phba->hbalock); | ||
1250 | return -EINVAL; | ||
1251 | } | ||
1246 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1252 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1247 | writel(creg_val, phba->HCregaddr); | 1253 | writel(creg_val, phba->HCregaddr); |
1248 | readl(phba->HCregaddr); /* flush */ | 1254 | readl(phba->HCregaddr); /* flush */ |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 0dd43bb91618..793b9f1131fb 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
348 | dd_data->context_un.iocb.bmp = bmp; | 348 | dd_data->context_un.iocb.bmp = bmp; |
349 | 349 | ||
350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
351 | creg_val = readl(phba->HCregaddr); | 351 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
352 | rc = -EIO ; | ||
353 | goto free_cmdiocbq; | ||
354 | } | ||
352 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 355 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
353 | writel(creg_val, phba->HCregaddr); | 356 | writel(creg_val, phba->HCregaddr); |
354 | readl(phba->HCregaddr); /* flush */ | 357 | readl(phba->HCregaddr); /* flush */ |
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
599 | dd_data->context_un.iocb.ndlp = ndlp; | 602 | dd_data->context_un.iocb.ndlp = ndlp; |
600 | 603 | ||
601 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 604 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
602 | creg_val = readl(phba->HCregaddr); | 605 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
606 | rc = -EIO; | ||
607 | goto linkdown_err; | ||
608 | } | ||
603 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 609 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
604 | writel(creg_val, phba->HCregaddr); | 610 | writel(creg_val, phba->HCregaddr); |
605 | readl(phba->HCregaddr); /* flush */ | 611 | readl(phba->HCregaddr); /* flush */ |
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
613 | else | 619 | else |
614 | rc = -EIO; | 620 | rc = -EIO; |
615 | 621 | ||
622 | linkdown_err: | ||
616 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 623 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
617 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 624 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
618 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 625 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1357 | dd_data->context_un.iocb.ndlp = ndlp; | 1364 | dd_data->context_un.iocb.ndlp = ndlp; |
1358 | 1365 | ||
1359 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 1366 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
1360 | creg_val = readl(phba->HCregaddr); | 1367 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1368 | rc = -IOCB_ERROR; | ||
1369 | goto issue_ct_rsp_exit; | ||
1370 | } | ||
1361 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1371 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1362 | writel(creg_val, phba->HCregaddr); | 1372 | writel(creg_val, phba->HCregaddr); |
1363 | readl(phba->HCregaddr); /* flush */ | 1373 | readl(phba->HCregaddr); /* flush */ |
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | |||
2479 | 2489 | ||
2480 | from = (uint8_t *)dd_data->context_un.mbox.mb; | 2490 | from = (uint8_t *)dd_data->context_un.mbox.mb; |
2481 | job = dd_data->context_un.mbox.set_job; | 2491 | job = dd_data->context_un.mbox.set_job; |
2482 | size = job->reply_payload.payload_len; | 2492 | if (job) { |
2483 | job->reply->reply_payload_rcv_len = | 2493 | size = job->reply_payload.payload_len; |
2484 | sg_copy_from_buffer(job->reply_payload.sg_list, | 2494 | job->reply->reply_payload_rcv_len = |
2485 | job->reply_payload.sg_cnt, | 2495 | sg_copy_from_buffer(job->reply_payload.sg_list, |
2486 | from, size); | 2496 | job->reply_payload.sg_cnt, |
2487 | job->reply->result = 0; | 2497 | from, size); |
2498 | job->reply->result = 0; | ||
2488 | 2499 | ||
2500 | job->dd_data = NULL; | ||
2501 | job->job_done(job); | ||
2502 | } | ||
2489 | dd_data->context_un.mbox.set_job = NULL; | 2503 | dd_data->context_un.mbox.set_job = NULL; |
2490 | job->dd_data = NULL; | ||
2491 | job->job_done(job); | ||
2492 | /* need to hold the lock until we call job done to hold off | 2504 | /* need to hold the lock until we call job done to hold off |
2493 | * the timeout handler returning to the midlayer while | 2505 | * the timeout handler returning to the midlayer while |
2494 | * we are stillprocessing the job | 2506 | * we are stillprocessing the job |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 3d40023f4804..f0b332f4eedb 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); | |||
254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, | 254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, |
255 | uint32_t); | 255 | uint32_t); |
256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); | 256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); |
257 | 257 | int lpfc_selective_reset(struct lpfc_hba *); | |
258 | void lpfc_reset_barrier(struct lpfc_hba * phba); | 258 | void lpfc_reset_barrier(struct lpfc_hba *); |
259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); | 259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
260 | int lpfc_sli_brdkill(struct lpfc_hba *); | 260 | int lpfc_sli_brdkill(struct lpfc_hba *); |
261 | int lpfc_sli_brdreset(struct lpfc_hba *); | 261 | int lpfc_sli_brdreset(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8e28edf9801e..735028fedda5 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) | |||
89 | return 0; | 89 | return 0; |
90 | 90 | ||
91 | /* Read the HBA Host Attention Register */ | 91 | /* Read the HBA Host Attention Register */ |
92 | ha_copy = readl(phba->HAregaddr); | 92 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
93 | return 1; | ||
93 | 94 | ||
94 | if (!(ha_copy & HA_LATT)) | 95 | if (!(ha_copy & HA_LATT)) |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 94ae37c5111a..95f11ed79463 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ | 1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ |
1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ | 1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ |
1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ | 1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ |
1347 | 1347 | #define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ | |
1348 | /* Host Control Register */ | 1348 | /* Host Control Register */ |
1349 | 1349 | ||
1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ | 1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ |
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 { | |||
1713 | #define pde6_apptagval_WORD word2 | 1713 | #define pde6_apptagval_WORD word2 |
1714 | }; | 1714 | }; |
1715 | 1715 | ||
1716 | struct lpfc_pde7 { | ||
1717 | uint32_t word0; | ||
1718 | #define pde7_type_SHIFT 24 | ||
1719 | #define pde7_type_MASK 0x000000ff | ||
1720 | #define pde7_type_WORD word0 | ||
1721 | #define pde7_rsvd0_SHIFT 0 | ||
1722 | #define pde7_rsvd0_MASK 0x00ffffff | ||
1723 | #define pde7_rsvd0_WORD word0 | ||
1724 | uint32_t addrHigh; | ||
1725 | uint32_t addrLow; | ||
1726 | }; | ||
1716 | 1727 | ||
1717 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ | 1728 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ |
1718 | 1729 | ||
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */ | |||
3621 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ | 3632 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ |
3622 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ | 3633 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ |
3623 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ | 3634 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ |
3624 | struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ | 3635 | struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ |
3625 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ | 3636 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ |
3626 | } un; | 3637 | } un; |
3627 | union { | 3638 | union { |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c7178d60c7bf..8433ac0d9fb4 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags { | |||
215 | #define lpfc_fip_flag_WORD word0 | 215 | #define lpfc_fip_flag_WORD word0 |
216 | }; | 216 | }; |
217 | 217 | ||
218 | struct sli4_bls_acc { | 218 | struct sli4_bls_rsp { |
219 | uint32_t word0_rsvd; /* Word0 must be reserved */ | 219 | uint32_t word0_rsvd; /* Word0 must be reserved */ |
220 | uint32_t word1; | 220 | uint32_t word1; |
221 | #define lpfc_abts_orig_SHIFT 0 | 221 | #define lpfc_abts_orig_SHIFT 0 |
@@ -231,6 +231,16 @@ struct sli4_bls_acc { | |||
231 | #define lpfc_abts_oxid_MASK 0x0000FFFF | 231 | #define lpfc_abts_oxid_MASK 0x0000FFFF |
232 | #define lpfc_abts_oxid_WORD word2 | 232 | #define lpfc_abts_oxid_WORD word2 |
233 | uint32_t word3; | 233 | uint32_t word3; |
234 | #define lpfc_vndr_code_SHIFT 0 | ||
235 | #define lpfc_vndr_code_MASK 0x000000FF | ||
236 | #define lpfc_vndr_code_WORD word3 | ||
237 | #define lpfc_rsn_expln_SHIFT 8 | ||
238 | #define lpfc_rsn_expln_MASK 0x000000FF | ||
239 | #define lpfc_rsn_expln_WORD word3 | ||
240 | #define lpfc_rsn_code_SHIFT 16 | ||
241 | #define lpfc_rsn_code_MASK 0x000000FF | ||
242 | #define lpfc_rsn_code_WORD word3 | ||
243 | |||
234 | uint32_t word4; | 244 | uint32_t word4; |
235 | uint32_t word5_rsvd; /* Word5 must be reserved */ | 245 | uint32_t word5_rsvd; /* Word5 must be reserved */ |
236 | }; | 246 | }; |
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr { | |||
711 | union lpfc_sli4_cfg_shdr { | 721 | union lpfc_sli4_cfg_shdr { |
712 | struct { | 722 | struct { |
713 | uint32_t word6; | 723 | uint32_t word6; |
714 | #define lpfc_mbox_hdr_opcode_SHIFT 0 | 724 | #define lpfc_mbox_hdr_opcode_SHIFT 0 |
715 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF | 725 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF |
716 | #define lpfc_mbox_hdr_opcode_WORD word6 | 726 | #define lpfc_mbox_hdr_opcode_WORD word6 |
717 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 | 727 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 |
718 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF | 728 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF |
719 | #define lpfc_mbox_hdr_subsystem_WORD word6 | 729 | #define lpfc_mbox_hdr_subsystem_WORD word6 |
720 | #define lpfc_mbox_hdr_port_number_SHIFT 16 | 730 | #define lpfc_mbox_hdr_port_number_SHIFT 16 |
721 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF | 731 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF |
722 | #define lpfc_mbox_hdr_port_number_WORD word6 | 732 | #define lpfc_mbox_hdr_port_number_WORD word6 |
723 | #define lpfc_mbox_hdr_domain_SHIFT 24 | 733 | #define lpfc_mbox_hdr_domain_SHIFT 24 |
724 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF | 734 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF |
725 | #define lpfc_mbox_hdr_domain_WORD word6 | 735 | #define lpfc_mbox_hdr_domain_WORD word6 |
726 | uint32_t timeout; | 736 | uint32_t timeout; |
727 | uint32_t request_length; | 737 | uint32_t request_length; |
728 | uint32_t reserved9; | 738 | uint32_t word9; |
739 | #define lpfc_mbox_hdr_version_SHIFT 0 | ||
740 | #define lpfc_mbox_hdr_version_MASK 0x000000FF | ||
741 | #define lpfc_mbox_hdr_version_WORD word9 | ||
742 | #define LPFC_Q_CREATE_VERSION_2 2 | ||
743 | #define LPFC_Q_CREATE_VERSION_1 1 | ||
744 | #define LPFC_Q_CREATE_VERSION_0 0 | ||
729 | } request; | 745 | } request; |
730 | struct { | 746 | struct { |
731 | uint32_t word6; | 747 | uint32_t word6; |
@@ -917,9 +933,12 @@ struct cq_context { | |||
917 | #define LPFC_CQ_CNT_512 0x1 | 933 | #define LPFC_CQ_CNT_512 0x1 |
918 | #define LPFC_CQ_CNT_1024 0x2 | 934 | #define LPFC_CQ_CNT_1024 0x2 |
919 | uint32_t word1; | 935 | uint32_t word1; |
920 | #define lpfc_cq_eq_id_SHIFT 22 | 936 | #define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ |
921 | #define lpfc_cq_eq_id_MASK 0x000000FF | 937 | #define lpfc_cq_eq_id_MASK 0x000000FF |
922 | #define lpfc_cq_eq_id_WORD word1 | 938 | #define lpfc_cq_eq_id_WORD word1 |
939 | #define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ | ||
940 | #define lpfc_cq_eq_id_2_MASK 0x0000FFFF | ||
941 | #define lpfc_cq_eq_id_2_WORD word1 | ||
923 | uint32_t reserved0; | 942 | uint32_t reserved0; |
924 | uint32_t reserved1; | 943 | uint32_t reserved1; |
925 | }; | 944 | }; |
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create { | |||
929 | union { | 948 | union { |
930 | struct { | 949 | struct { |
931 | uint32_t word0; | 950 | uint32_t word0; |
951 | #define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ | ||
952 | #define lpfc_mbx_cq_create_page_size_MASK 0x000000FF | ||
953 | #define lpfc_mbx_cq_create_page_size_WORD word0 | ||
932 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 | 954 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 |
933 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF | 955 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF |
934 | #define lpfc_mbx_cq_create_num_pages_WORD word0 | 956 | #define lpfc_mbx_cq_create_num_pages_WORD word0 |
@@ -969,7 +991,7 @@ struct wq_context { | |||
969 | struct lpfc_mbx_wq_create { | 991 | struct lpfc_mbx_wq_create { |
970 | struct mbox_header header; | 992 | struct mbox_header header; |
971 | union { | 993 | union { |
972 | struct { | 994 | struct { /* Version 0 Request */ |
973 | uint32_t word0; | 995 | uint32_t word0; |
974 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 | 996 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 |
975 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF | 997 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF |
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create { | |||
979 | #define lpfc_mbx_wq_create_cq_id_WORD word0 | 1001 | #define lpfc_mbx_wq_create_cq_id_WORD word0 |
980 | struct dma_address page[LPFC_MAX_WQ_PAGE]; | 1002 | struct dma_address page[LPFC_MAX_WQ_PAGE]; |
981 | } request; | 1003 | } request; |
1004 | struct { /* Version 1 Request */ | ||
1005 | uint32_t word0; /* Word 0 is the same as in v0 */ | ||
1006 | uint32_t word1; | ||
1007 | #define lpfc_mbx_wq_create_page_size_SHIFT 0 | ||
1008 | #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF | ||
1009 | #define lpfc_mbx_wq_create_page_size_WORD word1 | ||
1010 | #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 | ||
1011 | #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F | ||
1012 | #define lpfc_mbx_wq_create_wqe_size_WORD word1 | ||
1013 | #define LPFC_WQ_WQE_SIZE_64 0x5 | ||
1014 | #define LPFC_WQ_WQE_SIZE_128 0x6 | ||
1015 | #define lpfc_mbx_wq_create_wqe_count_SHIFT 16 | ||
1016 | #define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF | ||
1017 | #define lpfc_mbx_wq_create_wqe_count_WORD word1 | ||
1018 | uint32_t word2; | ||
1019 | struct dma_address page[LPFC_MAX_WQ_PAGE-1]; | ||
1020 | } request_1; | ||
982 | struct { | 1021 | struct { |
983 | uint32_t word0; | 1022 | uint32_t word0; |
984 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 | 1023 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 |
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy { | |||
1007 | #define LPFC_DATA_BUF_SIZE 2048 | 1046 | #define LPFC_DATA_BUF_SIZE 2048 |
1008 | struct rq_context { | 1047 | struct rq_context { |
1009 | uint32_t word0; | 1048 | uint32_t word0; |
1010 | #define lpfc_rq_context_rq_size_SHIFT 16 | 1049 | #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ |
1011 | #define lpfc_rq_context_rq_size_MASK 0x0000000F | 1050 | #define lpfc_rq_context_rqe_count_MASK 0x0000000F |
1012 | #define lpfc_rq_context_rq_size_WORD word0 | 1051 | #define lpfc_rq_context_rqe_count_WORD word0 |
1013 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ | 1052 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ |
1014 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ | 1053 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ |
1015 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ | 1054 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ |
1016 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ | 1055 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ |
1056 | #define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ | ||
1057 | #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF | ||
1058 | #define lpfc_rq_context_rqe_count_1_WORD word0 | ||
1059 | #define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ | ||
1060 | #define lpfc_rq_context_rqe_size_MASK 0x0000000F | ||
1061 | #define lpfc_rq_context_rqe_size_WORD word0 | ||
1062 | #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ | ||
1063 | #define lpfc_rq_context_page_size_MASK 0x000000FF | ||
1064 | #define lpfc_rq_context_page_size_WORD word0 | ||
1017 | uint32_t reserved1; | 1065 | uint32_t reserved1; |
1018 | uint32_t word2; | 1066 | uint32_t word2; |
1019 | #define lpfc_rq_context_cq_id_SHIFT 16 | 1067 | #define lpfc_rq_context_cq_id_SHIFT 16 |
@@ -1022,7 +1070,7 @@ struct rq_context { | |||
1022 | #define lpfc_rq_context_buf_size_SHIFT 0 | 1070 | #define lpfc_rq_context_buf_size_SHIFT 0 |
1023 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF | 1071 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF |
1024 | #define lpfc_rq_context_buf_size_WORD word2 | 1072 | #define lpfc_rq_context_buf_size_WORD word2 |
1025 | uint32_t reserved3; | 1073 | uint32_t buffer_size; /* Version 1 Only */ |
1026 | }; | 1074 | }; |
1027 | 1075 | ||
1028 | struct lpfc_mbx_rq_create { | 1076 | struct lpfc_mbx_rq_create { |
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy { | |||
1062 | 1110 | ||
1063 | struct mq_context { | 1111 | struct mq_context { |
1064 | uint32_t word0; | 1112 | uint32_t word0; |
1065 | #define lpfc_mq_context_cq_id_SHIFT 22 | 1113 | #define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ |
1066 | #define lpfc_mq_context_cq_id_MASK 0x000003FF | 1114 | #define lpfc_mq_context_cq_id_MASK 0x000003FF |
1067 | #define lpfc_mq_context_cq_id_WORD word0 | 1115 | #define lpfc_mq_context_cq_id_WORD word0 |
1068 | #define lpfc_mq_context_count_SHIFT 16 | 1116 | #define lpfc_mq_context_ring_size_SHIFT 16 |
1069 | #define lpfc_mq_context_count_MASK 0x0000000F | 1117 | #define lpfc_mq_context_ring_size_MASK 0x0000000F |
1070 | #define lpfc_mq_context_count_WORD word0 | 1118 | #define lpfc_mq_context_ring_size_WORD word0 |
1071 | #define LPFC_MQ_CNT_16 0x5 | 1119 | #define LPFC_MQ_RING_SIZE_16 0x5 |
1072 | #define LPFC_MQ_CNT_32 0x6 | 1120 | #define LPFC_MQ_RING_SIZE_32 0x6 |
1073 | #define LPFC_MQ_CNT_64 0x7 | 1121 | #define LPFC_MQ_RING_SIZE_64 0x7 |
1074 | #define LPFC_MQ_CNT_128 0x8 | 1122 | #define LPFC_MQ_RING_SIZE_128 0x8 |
1075 | uint32_t word1; | 1123 | uint32_t word1; |
1076 | #define lpfc_mq_context_valid_SHIFT 31 | 1124 | #define lpfc_mq_context_valid_SHIFT 31 |
1077 | #define lpfc_mq_context_valid_MASK 0x00000001 | 1125 | #define lpfc_mq_context_valid_MASK 0x00000001 |
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext { | |||
1105 | union { | 1153 | union { |
1106 | struct { | 1154 | struct { |
1107 | uint32_t word0; | 1155 | uint32_t word0; |
1108 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 | 1156 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 |
1109 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF | 1157 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF |
1110 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 | 1158 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 |
1159 | #define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ | ||
1160 | #define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF | ||
1161 | #define lpfc_mbx_mq_create_ext_cq_id_WORD word0 | ||
1111 | uint32_t async_evt_bmap; | 1162 | uint32_t async_evt_bmap; |
1112 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK | 1163 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK |
1113 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 | 1164 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 35665cfb5689..e6ebe516cfbb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; | 507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; |
508 | 508 | ||
509 | /* Enable appropriate host interrupts */ | 509 | /* Enable appropriate host interrupts */ |
510 | status = readl(phba->HCregaddr); | 510 | if (lpfc_readl(phba->HCregaddr, &status)) { |
511 | spin_unlock_irq(&phba->hbalock); | ||
512 | return -EIO; | ||
513 | } | ||
511 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; | 514 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; |
512 | if (psli->num_rings > 0) | 515 | if (psli->num_rings > 0) |
513 | status |= HC_R0INT_ENA; | 516 | status |= HC_R0INT_ENA; |
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) | |||
1222 | /* Wait for the ER1 bit to clear.*/ | 1225 | /* Wait for the ER1 bit to clear.*/ |
1223 | while (phba->work_hs & HS_FFER1) { | 1226 | while (phba->work_hs & HS_FFER1) { |
1224 | msleep(100); | 1227 | msleep(100); |
1225 | phba->work_hs = readl(phba->HSregaddr); | 1228 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { |
1229 | phba->work_hs = UNPLUG_ERR ; | ||
1230 | break; | ||
1231 | } | ||
1226 | /* If driver is unloading let the worker thread continue */ | 1232 | /* If driver is unloading let the worker thread continue */ |
1227 | if (phba->pport->load_flag & FC_UNLOADING) { | 1233 | if (phba->pport->load_flag & FC_UNLOADING) { |
1228 | phba->work_hs = 0; | 1234 | phba->work_hs = 0; |
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
4474 | { | 4480 | { |
4475 | phba->lpfc_hba_init_link = lpfc_hba_init_link; | 4481 | phba->lpfc_hba_init_link = lpfc_hba_init_link; |
4476 | phba->lpfc_hba_down_link = lpfc_hba_down_link; | 4482 | phba->lpfc_hba_down_link = lpfc_hba_down_link; |
4483 | phba->lpfc_selective_reset = lpfc_selective_reset; | ||
4477 | switch (dev_grp) { | 4484 | switch (dev_grp) { |
4478 | case LPFC_PCI_DEV_LP: | 4485 | case LPFC_PCI_DEV_LP: |
4479 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; | 4486 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; |
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5385 | int i, port_error = 0; | 5392 | int i, port_error = 0; |
5386 | uint32_t if_type; | 5393 | uint32_t if_type; |
5387 | 5394 | ||
5395 | memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); | ||
5396 | memset(®_data, 0, sizeof(reg_data)); | ||
5388 | if (!phba->sli4_hba.PSMPHRregaddr) | 5397 | if (!phba->sli4_hba.PSMPHRregaddr) |
5389 | return -ENODEV; | 5398 | return -ENODEV; |
5390 | 5399 | ||
5391 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ | 5400 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ |
5392 | for (i = 0; i < 3000; i++) { | 5401 | for (i = 0; i < 3000; i++) { |
5393 | portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); | 5402 | if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
5394 | if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { | 5403 | &portsmphr_reg.word0) || |
5404 | (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { | ||
5395 | /* Port has a fatal POST error, break out */ | 5405 | /* Port has a fatal POST error, break out */ |
5396 | port_error = -ENODEV; | 5406 | port_error = -ENODEV; |
5397 | break; | 5407 | break; |
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5472 | break; | 5482 | break; |
5473 | case LPFC_SLI_INTF_IF_TYPE_2: | 5483 | case LPFC_SLI_INTF_IF_TYPE_2: |
5474 | /* Final checks. The port status should be clean. */ | 5484 | /* Final checks. The port status should be clean. */ |
5475 | reg_data.word0 = | 5485 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
5476 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 5486 | ®_data.word0) || |
5477 | if (bf_get(lpfc_sliport_status_err, ®_data)) { | 5487 | bf_get(lpfc_sliport_status_err, ®_data)) { |
5478 | phba->work_status[0] = | 5488 | phba->work_status[0] = |
5479 | readl(phba->sli4_hba.u.if_type2. | 5489 | readl(phba->sli4_hba.u.if_type2. |
5480 | ERR1regaddr); | 5490 | ERR1regaddr); |
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6760 | * the loop again. | 6770 | * the loop again. |
6761 | */ | 6771 | */ |
6762 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { | 6772 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { |
6763 | reg_data.word0 = | 6773 | if (lpfc_readl(phba->sli4_hba.u.if_type2. |
6764 | readl(phba->sli4_hba.u.if_type2. | 6774 | STATUSregaddr, ®_data.word0)) { |
6765 | STATUSregaddr); | 6775 | rc = -ENODEV; |
6776 | break; | ||
6777 | } | ||
6766 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) | 6778 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) |
6767 | break; | 6779 | break; |
6768 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { | 6780 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { |
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6783 | } | 6795 | } |
6784 | 6796 | ||
6785 | /* Detect any port errors. */ | 6797 | /* Detect any port errors. */ |
6786 | reg_data.word0 = readl(phba->sli4_hba.u.if_type2. | 6798 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
6787 | STATUSregaddr); | 6799 | ®_data.word0)) { |
6800 | rc = -ENODEV; | ||
6801 | break; | ||
6802 | } | ||
6788 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || | 6803 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || |
6789 | (rdy_chk >= 1000)) { | 6804 | (rdy_chk >= 1000)) { |
6790 | phba->work_status[0] = readl( | 6805 | phba->work_status[0] = readl( |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf34178b80bf..2b962b020cfb 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ | 1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
1515 | struct lpfc_pde5 *pde5 = NULL; | 1515 | struct lpfc_pde5 *pde5 = NULL; |
1516 | struct lpfc_pde6 *pde6 = NULL; | 1516 | struct lpfc_pde6 *pde6 = NULL; |
1517 | struct ulp_bde64 *prot_bde = NULL; | 1517 | struct lpfc_pde7 *pde7 = NULL; |
1518 | dma_addr_t dataphysaddr, protphysaddr; | 1518 | dma_addr_t dataphysaddr, protphysaddr; |
1519 | unsigned short curr_data = 0, curr_prot = 0; | 1519 | unsigned short curr_data = 0, curr_prot = 0; |
1520 | unsigned int split_offset, protgroup_len; | 1520 | unsigned int split_offset; |
1521 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; | ||
1521 | unsigned int protgrp_blks, protgrp_bytes; | 1522 | unsigned int protgrp_blks, protgrp_bytes; |
1522 | unsigned int remainder, subtotal; | 1523 | unsigned int remainder, subtotal; |
1523 | int status; | 1524 | int status; |
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1585 | bpl++; | 1586 | bpl++; |
1586 | 1587 | ||
1587 | /* setup the first BDE that points to protection buffer */ | 1588 | /* setup the first BDE that points to protection buffer */ |
1588 | prot_bde = (struct ulp_bde64 *) bpl; | 1589 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
1589 | protphysaddr = sg_dma_address(sgpe); | 1590 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
1590 | prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1591 | prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1592 | protgroup_len = sg_dma_len(sgpe); | ||
1593 | 1591 | ||
1594 | /* must be integer multiple of the DIF block length */ | 1592 | /* must be integer multiple of the DIF block length */ |
1595 | BUG_ON(protgroup_len % 8); | 1593 | BUG_ON(protgroup_len % 8); |
1596 | 1594 | ||
1595 | pde7 = (struct lpfc_pde7 *) bpl; | ||
1596 | memset(pde7, 0, sizeof(struct lpfc_pde7)); | ||
1597 | bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); | ||
1598 | |||
1599 | pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1600 | pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1601 | |||
1597 | protgrp_blks = protgroup_len / 8; | 1602 | protgrp_blks = protgroup_len / 8; |
1598 | protgrp_bytes = protgrp_blks * blksize; | 1603 | protgrp_bytes = protgrp_blks * blksize; |
1599 | 1604 | ||
1600 | prot_bde->tus.f.bdeSize = protgroup_len; | 1605 | /* check if this pde is crossing the 4K boundary; if so split */ |
1601 | prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; | 1606 | if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { |
1602 | prot_bde->tus.w = le32_to_cpu(bpl->tus.w); | 1607 | protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); |
1608 | protgroup_offset += protgroup_remainder; | ||
1609 | protgrp_blks = protgroup_remainder / 8; | ||
1610 | protgrp_bytes = protgroup_remainder * blksize; | ||
1611 | } else { | ||
1612 | protgroup_offset = 0; | ||
1613 | curr_prot++; | ||
1614 | } | ||
1603 | 1615 | ||
1604 | curr_prot++; | ||
1605 | num_bde++; | 1616 | num_bde++; |
1606 | 1617 | ||
1607 | /* setup BDE's for data blocks associated with DIF data */ | 1618 | /* setup BDE's for data blocks associated with DIF data */ |
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1653 | 1664 | ||
1654 | } | 1665 | } |
1655 | 1666 | ||
1667 | if (protgroup_offset) { | ||
1668 | /* update the reference tag */ | ||
1669 | reftag += protgrp_blks; | ||
1670 | bpl++; | ||
1671 | continue; | ||
1672 | } | ||
1673 | |||
1656 | /* are we done ? */ | 1674 | /* are we done ? */ |
1657 | if (curr_prot == protcnt) { | 1675 | if (curr_prot == protcnt) { |
1658 | alldone = 1; | 1676 | alldone = 1; |
@@ -1675,6 +1693,7 @@ out: | |||
1675 | 1693 | ||
1676 | return num_bde; | 1694 | return num_bde; |
1677 | } | 1695 | } |
1696 | |||
1678 | /* | 1697 | /* |
1679 | * Given a SCSI command that supports DIF, determine composition of protection | 1698 | * Given a SCSI command that supports DIF, determine composition of protection |
1680 | * groups involved in setting up buffer lists | 1699 | * groups involved in setting up buffer lists |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ee0374a9908..4746dcd756dd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3477 | int retval = 0; | 3477 | int retval = 0; |
3478 | 3478 | ||
3479 | /* Read the HBA Host Status Register */ | 3479 | /* Read the HBA Host Status Register */ |
3480 | status = readl(phba->HSregaddr); | 3480 | if (lpfc_readl(phba->HSregaddr, &status)) |
3481 | return 1; | ||
3481 | 3482 | ||
3482 | /* | 3483 | /* |
3483 | * Check status register every 100ms for 5 retries, then every | 3484 | * Check status register every 100ms for 5 retries, then every |
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3502 | lpfc_sli_brdrestart(phba); | 3503 | lpfc_sli_brdrestart(phba); |
3503 | } | 3504 | } |
3504 | /* Read the HBA Host Status Register */ | 3505 | /* Read the HBA Host Status Register */ |
3505 | status = readl(phba->HSregaddr); | 3506 | if (lpfc_readl(phba->HSregaddr, &status)) { |
3507 | retval = 1; | ||
3508 | break; | ||
3509 | } | ||
3506 | } | 3510 | } |
3507 | 3511 | ||
3508 | /* Check to see if any errors occurred during init */ | 3512 | /* Check to see if any errors occurred during init */ |
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3584 | uint32_t __iomem *resp_buf; | 3588 | uint32_t __iomem *resp_buf; |
3585 | uint32_t __iomem *mbox_buf; | 3589 | uint32_t __iomem *mbox_buf; |
3586 | volatile uint32_t mbox; | 3590 | volatile uint32_t mbox; |
3587 | uint32_t hc_copy; | 3591 | uint32_t hc_copy, ha_copy, resp_data; |
3588 | int i; | 3592 | int i; |
3589 | uint8_t hdrtype; | 3593 | uint8_t hdrtype; |
3590 | 3594 | ||
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3601 | resp_buf = phba->MBslimaddr; | 3605 | resp_buf = phba->MBslimaddr; |
3602 | 3606 | ||
3603 | /* Disable the error attention */ | 3607 | /* Disable the error attention */ |
3604 | hc_copy = readl(phba->HCregaddr); | 3608 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
3609 | return; | ||
3605 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); | 3610 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); |
3606 | readl(phba->HCregaddr); /* flush */ | 3611 | readl(phba->HCregaddr); /* flush */ |
3607 | phba->link_flag |= LS_IGNORE_ERATT; | 3612 | phba->link_flag |= LS_IGNORE_ERATT; |
3608 | 3613 | ||
3609 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3614 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3615 | return; | ||
3616 | if (ha_copy & HA_ERATT) { | ||
3610 | /* Clear Chip error bit */ | 3617 | /* Clear Chip error bit */ |
3611 | writel(HA_ERATT, phba->HAregaddr); | 3618 | writel(HA_ERATT, phba->HAregaddr); |
3612 | phba->pport->stopped = 1; | 3619 | phba->pport->stopped = 1; |
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3620 | mbox_buf = phba->MBslimaddr; | 3627 | mbox_buf = phba->MBslimaddr; |
3621 | writel(mbox, mbox_buf); | 3628 | writel(mbox, mbox_buf); |
3622 | 3629 | ||
3623 | for (i = 0; | 3630 | for (i = 0; i < 50; i++) { |
3624 | readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) | 3631 | if (lpfc_readl((resp_buf + 1), &resp_data)) |
3625 | mdelay(1); | 3632 | return; |
3626 | 3633 | if (resp_data != ~(BARRIER_TEST_PATTERN)) | |
3627 | if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { | 3634 | mdelay(1); |
3635 | else | ||
3636 | break; | ||
3637 | } | ||
3638 | resp_data = 0; | ||
3639 | if (lpfc_readl((resp_buf + 1), &resp_data)) | ||
3640 | return; | ||
3641 | if (resp_data != ~(BARRIER_TEST_PATTERN)) { | ||
3628 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || | 3642 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || |
3629 | phba->pport->stopped) | 3643 | phba->pport->stopped) |
3630 | goto restore_hc; | 3644 | goto restore_hc; |
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3633 | } | 3647 | } |
3634 | 3648 | ||
3635 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; | 3649 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; |
3636 | for (i = 0; readl(resp_buf) != mbox && i < 500; i++) | 3650 | resp_data = 0; |
3637 | mdelay(1); | 3651 | for (i = 0; i < 500; i++) { |
3652 | if (lpfc_readl(resp_buf, &resp_data)) | ||
3653 | return; | ||
3654 | if (resp_data != mbox) | ||
3655 | mdelay(1); | ||
3656 | else | ||
3657 | break; | ||
3658 | } | ||
3638 | 3659 | ||
3639 | clear_errat: | 3660 | clear_errat: |
3640 | 3661 | ||
3641 | while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) | 3662 | while (++i < 500) { |
3642 | mdelay(1); | 3663 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3664 | return; | ||
3665 | if (!(ha_copy & HA_ERATT)) | ||
3666 | mdelay(1); | ||
3667 | else | ||
3668 | break; | ||
3669 | } | ||
3643 | 3670 | ||
3644 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3671 | if (readl(phba->HAregaddr) & HA_ERATT) { |
3645 | writel(HA_ERATT, phba->HAregaddr); | 3672 | writel(HA_ERATT, phba->HAregaddr); |
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3686 | 3713 | ||
3687 | /* Disable the error attention */ | 3714 | /* Disable the error attention */ |
3688 | spin_lock_irq(&phba->hbalock); | 3715 | spin_lock_irq(&phba->hbalock); |
3689 | status = readl(phba->HCregaddr); | 3716 | if (lpfc_readl(phba->HCregaddr, &status)) { |
3717 | spin_unlock_irq(&phba->hbalock); | ||
3718 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3719 | return 1; | ||
3720 | } | ||
3690 | status &= ~HC_ERINT_ENA; | 3721 | status &= ~HC_ERINT_ENA; |
3691 | writel(status, phba->HCregaddr); | 3722 | writel(status, phba->HCregaddr); |
3692 | readl(phba->HCregaddr); /* flush */ | 3723 | readl(phba->HCregaddr); /* flush */ |
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3720 | * 3 seconds we still set HBA_ERROR state because the status of the | 3751 | * 3 seconds we still set HBA_ERROR state because the status of the |
3721 | * board is now undefined. | 3752 | * board is now undefined. |
3722 | */ | 3753 | */ |
3723 | ha_copy = readl(phba->HAregaddr); | 3754 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3724 | 3755 | return 1; | |
3725 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { | 3756 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { |
3726 | mdelay(100); | 3757 | mdelay(100); |
3727 | ha_copy = readl(phba->HAregaddr); | 3758 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3759 | return 1; | ||
3728 | } | 3760 | } |
3729 | 3761 | ||
3730 | del_timer_sync(&psli->mbox_tmo); | 3762 | del_timer_sync(&psli->mbox_tmo); |
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4018 | uint32_t status, i = 0; | 4050 | uint32_t status, i = 0; |
4019 | 4051 | ||
4020 | /* Read the HBA Host Status Register */ | 4052 | /* Read the HBA Host Status Register */ |
4021 | status = readl(phba->HSregaddr); | 4053 | if (lpfc_readl(phba->HSregaddr, &status)) |
4054 | return -EIO; | ||
4022 | 4055 | ||
4023 | /* Check status register to see what current state is */ | 4056 | /* Check status register to see what current state is */ |
4024 | i = 0; | 4057 | i = 0; |
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4073 | lpfc_sli_brdrestart(phba); | 4106 | lpfc_sli_brdrestart(phba); |
4074 | } | 4107 | } |
4075 | /* Read the HBA Host Status Register */ | 4108 | /* Read the HBA Host Status Register */ |
4076 | status = readl(phba->HSregaddr); | 4109 | if (lpfc_readl(phba->HSregaddr, &status)) |
4110 | return -EIO; | ||
4077 | } | 4111 | } |
4078 | 4112 | ||
4079 | /* Check to see if any errors occurred during init */ | 4113 | /* Check to see if any errors occurred during init */ |
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5136 | MAILBOX_t *mb; | 5170 | MAILBOX_t *mb; |
5137 | struct lpfc_sli *psli = &phba->sli; | 5171 | struct lpfc_sli *psli = &phba->sli; |
5138 | uint32_t status, evtctr; | 5172 | uint32_t status, evtctr; |
5139 | uint32_t ha_copy; | 5173 | uint32_t ha_copy, hc_copy; |
5140 | int i; | 5174 | int i; |
5141 | unsigned long timeout; | 5175 | unsigned long timeout; |
5142 | unsigned long drvr_flag = 0; | 5176 | unsigned long drvr_flag = 0; |
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5202 | goto out_not_finished; | 5236 | goto out_not_finished; |
5203 | } | 5237 | } |
5204 | 5238 | ||
5205 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 5239 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { |
5206 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 5240 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || |
5207 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 5241 | !(hc_copy & HC_MBINT_ENA)) { |
5208 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 5242 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
5243 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
5209 | "(%d):2528 Mailbox command x%x cannot " | 5244 | "(%d):2528 Mailbox command x%x cannot " |
5210 | "issue Data: x%x x%x\n", | 5245 | "issue Data: x%x x%x\n", |
5211 | pmbox->vport ? pmbox->vport->vpi : 0, | 5246 | pmbox->vport ? pmbox->vport->vpi : 0, |
5212 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); | 5247 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); |
5213 | goto out_not_finished; | 5248 | goto out_not_finished; |
5249 | } | ||
5214 | } | 5250 | } |
5215 | 5251 | ||
5216 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | 5252 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5408 | word0 = le32_to_cpu(word0); | 5444 | word0 = le32_to_cpu(word0); |
5409 | } else { | 5445 | } else { |
5410 | /* First read mbox status word */ | 5446 | /* First read mbox status word */ |
5411 | word0 = readl(phba->MBslimaddr); | 5447 | if (lpfc_readl(phba->MBslimaddr, &word0)) { |
5448 | spin_unlock_irqrestore(&phba->hbalock, | ||
5449 | drvr_flag); | ||
5450 | goto out_not_finished; | ||
5451 | } | ||
5412 | } | 5452 | } |
5413 | 5453 | ||
5414 | /* Read the HBA Host Attention Register */ | 5454 | /* Read the HBA Host Attention Register */ |
5415 | ha_copy = readl(phba->HAregaddr); | 5455 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5456 | spin_unlock_irqrestore(&phba->hbalock, | ||
5457 | drvr_flag); | ||
5458 | goto out_not_finished; | ||
5459 | } | ||
5416 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, | 5460 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, |
5417 | mb->mbxCommand) * | 5461 | mb->mbxCommand) * |
5418 | 1000) + jiffies; | 5462 | 1000) + jiffies; |
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5463 | word0 = readl(phba->MBslimaddr); | 5507 | word0 = readl(phba->MBslimaddr); |
5464 | } | 5508 | } |
5465 | /* Read the HBA Host Attention Register */ | 5509 | /* Read the HBA Host Attention Register */ |
5466 | ha_copy = readl(phba->HAregaddr); | 5510 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5511 | spin_unlock_irqrestore(&phba->hbalock, | ||
5512 | drvr_flag); | ||
5513 | goto out_not_finished; | ||
5514 | } | ||
5467 | } | 5515 | } |
5468 | 5516 | ||
5469 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 5517 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6263 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 6311 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
6264 | else | 6312 | else |
6265 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 6313 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
6266 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6267 | /* swap the size field back to the cpu so we | 6314 | /* swap the size field back to the cpu so we |
6268 | * can assign it to the sgl. | 6315 | * can assign it to the sgl. |
6269 | */ | 6316 | */ |
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6283 | bf_set(lpfc_sli4_sge_offset, sgl, offset); | 6330 | bf_set(lpfc_sli4_sge_offset, sgl, offset); |
6284 | offset += bde.tus.f.bdeSize; | 6331 | offset += bde.tus.f.bdeSize; |
6285 | } | 6332 | } |
6333 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6286 | bpl++; | 6334 | bpl++; |
6287 | sgl++; | 6335 | sgl++; |
6288 | } | 6336 | } |
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6528 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | 6576 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / |
6529 | sizeof(struct ulp_bde64); | 6577 | sizeof(struct ulp_bde64); |
6530 | for (i = 0; i < numBdes; i++) { | 6578 | for (i = 0; i < numBdes; i++) { |
6531 | if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6532 | break; | ||
6533 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); | 6579 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); |
6580 | if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6581 | break; | ||
6534 | xmit_len += bde.tus.f.bdeSize; | 6582 | xmit_len += bde.tus.f.bdeSize; |
6535 | } | 6583 | } |
6536 | /* word3 iocb=IO_TAG wqe=request_payload_len */ | 6584 | /* word3 iocb=IO_TAG wqe=request_payload_len */ |
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6620 | xritag = 0; | 6668 | xritag = 0; |
6621 | break; | 6669 | break; |
6622 | case CMD_XMIT_BLS_RSP64_CX: | 6670 | case CMD_XMIT_BLS_RSP64_CX: |
6623 | /* As BLS ABTS-ACC WQE is very different from other WQEs, | 6671 | /* As BLS ABTS RSP WQE is very different from other WQEs, |
6624 | * we re-construct this WQE here based on information in | 6672 | * we re-construct this WQE here based on information in |
6625 | * iocbq from scratch. | 6673 | * iocbq from scratch. |
6626 | */ | 6674 | */ |
6627 | memset(wqe, 0, sizeof(union lpfc_wqe)); | 6675 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
6628 | /* OX_ID is invariable to who sent ABTS to CT exchange */ | 6676 | /* OX_ID is invariable to who sent ABTS to CT exchange */ |
6629 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, | 6677 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, |
6630 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); | 6678 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); |
6631 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == | 6679 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == |
6632 | LPFC_ABTS_UNSOL_INT) { | 6680 | LPFC_ABTS_UNSOL_INT) { |
6633 | /* ABTS sent by initiator to CT exchange, the | 6681 | /* ABTS sent by initiator to CT exchange, the |
6634 | * RX_ID field will be filled with the newly | 6682 | * RX_ID field will be filled with the newly |
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6642 | * RX_ID from ABTS. | 6690 | * RX_ID from ABTS. |
6643 | */ | 6691 | */ |
6644 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, | 6692 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, |
6645 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); | 6693 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); |
6646 | } | 6694 | } |
6647 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); | 6695 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); |
6648 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); | 6696 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); |
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6653 | LPFC_WQE_LENLOC_NONE); | 6701 | LPFC_WQE_LENLOC_NONE); |
6654 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ | 6702 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ |
6655 | command_type = OTHER_COMMAND; | 6703 | command_type = OTHER_COMMAND; |
6704 | if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { | ||
6705 | bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, | ||
6706 | bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); | ||
6707 | bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, | ||
6708 | bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); | ||
6709 | bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, | ||
6710 | bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); | ||
6711 | } | ||
6712 | |||
6656 | break; | 6713 | break; |
6657 | case CMD_XRI_ABORTED_CX: | 6714 | case CMD_XRI_ABORTED_CX: |
6658 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 6715 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
6701 | 6758 | ||
6702 | if (piocb->sli4_xritag == NO_XRI) { | 6759 | if (piocb->sli4_xritag == NO_XRI) { |
6703 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | 6760 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
6704 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | 6761 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || |
6762 | piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) | ||
6705 | sglq = NULL; | 6763 | sglq = NULL; |
6706 | else { | 6764 | else { |
6707 | if (pring->txq_cnt) { | 6765 | if (pring->txq_cnt) { |
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8194 | piocb->iocb_flag &= ~LPFC_IO_WAKE; | 8252 | piocb->iocb_flag &= ~LPFC_IO_WAKE; |
8195 | 8253 | ||
8196 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8254 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8197 | creg_val = readl(phba->HCregaddr); | 8255 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8256 | return IOCB_ERROR; | ||
8198 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 8257 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
8199 | writel(creg_val, phba->HCregaddr); | 8258 | writel(creg_val, phba->HCregaddr); |
8200 | readl(phba->HCregaddr); /* flush */ | 8259 | readl(phba->HCregaddr); /* flush */ |
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8236 | } | 8295 | } |
8237 | 8296 | ||
8238 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8297 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8239 | creg_val = readl(phba->HCregaddr); | 8298 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8299 | return IOCB_ERROR; | ||
8240 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 8300 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
8241 | writel(creg_val, phba->HCregaddr); | 8301 | writel(creg_val, phba->HCregaddr); |
8242 | readl(phba->HCregaddr); /* flush */ | 8302 | readl(phba->HCregaddr); /* flush */ |
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8387 | uint32_t ha_copy; | 8447 | uint32_t ha_copy; |
8388 | 8448 | ||
8389 | /* Read chip Host Attention (HA) register */ | 8449 | /* Read chip Host Attention (HA) register */ |
8390 | ha_copy = readl(phba->HAregaddr); | 8450 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8451 | goto unplug_err; | ||
8452 | |||
8391 | if (ha_copy & HA_ERATT) { | 8453 | if (ha_copy & HA_ERATT) { |
8392 | /* Read host status register to retrieve error event */ | 8454 | /* Read host status register to retrieve error event */ |
8393 | lpfc_sli_read_hs(phba); | 8455 | if (lpfc_sli_read_hs(phba)) |
8456 | goto unplug_err; | ||
8394 | 8457 | ||
8395 | /* Check if there is a deferred error condition is active */ | 8458 | /* Check if there is a deferred error condition is active */ |
8396 | if ((HS_FFER1 & phba->work_hs) && | 8459 | if ((HS_FFER1 & phba->work_hs) && |
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8409 | return 1; | 8472 | return 1; |
8410 | } | 8473 | } |
8411 | return 0; | 8474 | return 0; |
8475 | |||
8476 | unplug_err: | ||
8477 | /* Set the driver HS work bitmap */ | ||
8478 | phba->work_hs |= UNPLUG_ERR; | ||
8479 | /* Set the driver HA work bitmap */ | ||
8480 | phba->work_ha |= HA_ERATT; | ||
8481 | /* Indicate polling handles this ERATT */ | ||
8482 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8483 | return 1; | ||
8412 | } | 8484 | } |
8413 | 8485 | ||
8414 | /** | 8486 | /** |
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8436 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); | 8508 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
8437 | switch (if_type) { | 8509 | switch (if_type) { |
8438 | case LPFC_SLI_INTF_IF_TYPE_0: | 8510 | case LPFC_SLI_INTF_IF_TYPE_0: |
8439 | uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); | 8511 | if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, |
8440 | uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); | 8512 | &uerr_sta_lo) || |
8513 | lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, | ||
8514 | &uerr_sta_hi)) { | ||
8515 | phba->work_hs |= UNPLUG_ERR; | ||
8516 | phba->work_ha |= HA_ERATT; | ||
8517 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8518 | return 1; | ||
8519 | } | ||
8441 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || | 8520 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
8442 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { | 8521 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
8443 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8522 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8456 | } | 8535 | } |
8457 | break; | 8536 | break; |
8458 | case LPFC_SLI_INTF_IF_TYPE_2: | 8537 | case LPFC_SLI_INTF_IF_TYPE_2: |
8459 | portstat_reg.word0 = | 8538 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
8460 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 8539 | &portstat_reg.word0) || |
8461 | portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); | 8540 | lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
8541 | &portsmphr)){ | ||
8542 | phba->work_hs |= UNPLUG_ERR; | ||
8543 | phba->work_ha |= HA_ERATT; | ||
8544 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8545 | return 1; | ||
8546 | } | ||
8462 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { | 8547 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { |
8463 | phba->work_status[0] = | 8548 | phba->work_status[0] = |
8464 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); | 8549 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); |
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8639 | return IRQ_NONE; | 8724 | return IRQ_NONE; |
8640 | /* Need to read HA REG for slow-path events */ | 8725 | /* Need to read HA REG for slow-path events */ |
8641 | spin_lock_irqsave(&phba->hbalock, iflag); | 8726 | spin_lock_irqsave(&phba->hbalock, iflag); |
8642 | ha_copy = readl(phba->HAregaddr); | 8727 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8728 | goto unplug_error; | ||
8643 | /* If somebody is waiting to handle an eratt don't process it | 8729 | /* If somebody is waiting to handle an eratt don't process it |
8644 | * here. The brdkill function will do this. | 8730 | * here. The brdkill function will do this. |
8645 | */ | 8731 | */ |
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8665 | } | 8751 | } |
8666 | 8752 | ||
8667 | /* Clear up only attention source related to slow-path */ | 8753 | /* Clear up only attention source related to slow-path */ |
8668 | hc_copy = readl(phba->HCregaddr); | 8754 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
8755 | goto unplug_error; | ||
8756 | |||
8669 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | | 8757 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | |
8670 | HC_LAINT_ENA | HC_ERINT_ENA), | 8758 | HC_LAINT_ENA | HC_ERINT_ENA), |
8671 | phba->HCregaddr); | 8759 | phba->HCregaddr); |
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8688 | */ | 8776 | */ |
8689 | spin_lock_irqsave(&phba->hbalock, iflag); | 8777 | spin_lock_irqsave(&phba->hbalock, iflag); |
8690 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; | 8778 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; |
8691 | control = readl(phba->HCregaddr); | 8779 | if (lpfc_readl(phba->HCregaddr, &control)) |
8780 | goto unplug_error; | ||
8692 | control &= ~HC_LAINT_ENA; | 8781 | control &= ~HC_LAINT_ENA; |
8693 | writel(control, phba->HCregaddr); | 8782 | writel(control, phba->HCregaddr); |
8694 | readl(phba->HCregaddr); /* flush */ | 8783 | readl(phba->HCregaddr); /* flush */ |
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8708 | status >>= (4*LPFC_ELS_RING); | 8797 | status >>= (4*LPFC_ELS_RING); |
8709 | if (status & HA_RXMASK) { | 8798 | if (status & HA_RXMASK) { |
8710 | spin_lock_irqsave(&phba->hbalock, iflag); | 8799 | spin_lock_irqsave(&phba->hbalock, iflag); |
8711 | control = readl(phba->HCregaddr); | 8800 | if (lpfc_readl(phba->HCregaddr, &control)) |
8801 | goto unplug_error; | ||
8712 | 8802 | ||
8713 | lpfc_debugfs_slow_ring_trc(phba, | 8803 | lpfc_debugfs_slow_ring_trc(phba, |
8714 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", | 8804 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", |
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8741 | } | 8831 | } |
8742 | spin_lock_irqsave(&phba->hbalock, iflag); | 8832 | spin_lock_irqsave(&phba->hbalock, iflag); |
8743 | if (work_ha_copy & HA_ERATT) { | 8833 | if (work_ha_copy & HA_ERATT) { |
8744 | lpfc_sli_read_hs(phba); | 8834 | if (lpfc_sli_read_hs(phba)) |
8835 | goto unplug_error; | ||
8745 | /* | 8836 | /* |
8746 | * Check if there is a deferred error condition | 8837 | * Check if there is a deferred error condition |
8747 | * is active | 8838 | * is active |
@@ -8872,6 +8963,9 @@ send_current_mbox: | |||
8872 | lpfc_worker_wake_up(phba); | 8963 | lpfc_worker_wake_up(phba); |
8873 | } | 8964 | } |
8874 | return IRQ_HANDLED; | 8965 | return IRQ_HANDLED; |
8966 | unplug_error: | ||
8967 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
8968 | return IRQ_HANDLED; | ||
8875 | 8969 | ||
8876 | } /* lpfc_sli_sp_intr_handler */ | 8970 | } /* lpfc_sli_sp_intr_handler */ |
8877 | 8971 | ||
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) | |||
8919 | if (lpfc_intr_state_check(phba)) | 9013 | if (lpfc_intr_state_check(phba)) |
8920 | return IRQ_NONE; | 9014 | return IRQ_NONE; |
8921 | /* Need to read HA REG for FCP ring and other ring events */ | 9015 | /* Need to read HA REG for FCP ring and other ring events */ |
8922 | ha_copy = readl(phba->HAregaddr); | 9016 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
9017 | return IRQ_HANDLED; | ||
8923 | /* Clear up only attention source related to fast-path */ | 9018 | /* Clear up only attention source related to fast-path */ |
8924 | spin_lock_irqsave(&phba->hbalock, iflag); | 9019 | spin_lock_irqsave(&phba->hbalock, iflag); |
8925 | /* | 9020 | /* |
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9004 | return IRQ_NONE; | 9099 | return IRQ_NONE; |
9005 | 9100 | ||
9006 | spin_lock(&phba->hbalock); | 9101 | spin_lock(&phba->hbalock); |
9007 | phba->ha_copy = readl(phba->HAregaddr); | 9102 | if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { |
9103 | spin_unlock(&phba->hbalock); | ||
9104 | return IRQ_HANDLED; | ||
9105 | } | ||
9106 | |||
9008 | if (unlikely(!phba->ha_copy)) { | 9107 | if (unlikely(!phba->ha_copy)) { |
9009 | spin_unlock(&phba->hbalock); | 9108 | spin_unlock(&phba->hbalock); |
9010 | return IRQ_NONE; | 9109 | return IRQ_NONE; |
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9026 | } | 9125 | } |
9027 | 9126 | ||
9028 | /* Clear attention sources except link and error attentions */ | 9127 | /* Clear attention sources except link and error attentions */ |
9029 | hc_copy = readl(phba->HCregaddr); | 9128 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) { |
9129 | spin_unlock(&phba->hbalock); | ||
9130 | return IRQ_HANDLED; | ||
9131 | } | ||
9030 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | 9132 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA |
9031 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), | 9133 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), |
9032 | phba->HCregaddr); | 9134 | phba->HCregaddr); |
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10403 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10505 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10404 | hw_page_size = SLI4_PAGE_SIZE; | 10506 | hw_page_size = SLI4_PAGE_SIZE; |
10405 | 10507 | ||
10406 | |||
10407 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 10508 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10408 | if (!mbox) | 10509 | if (!mbox) |
10409 | return -ENOMEM; | 10510 | return -ENOMEM; |
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10413 | LPFC_MBOX_OPCODE_CQ_CREATE, | 10514 | LPFC_MBOX_OPCODE_CQ_CREATE, |
10414 | length, LPFC_SLI4_MBX_EMBED); | 10515 | length, LPFC_SLI4_MBX_EMBED); |
10415 | cq_create = &mbox->u.mqe.un.cq_create; | 10516 | cq_create = &mbox->u.mqe.un.cq_create; |
10517 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10416 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, | 10518 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, |
10417 | cq->page_count); | 10519 | cq->page_count); |
10418 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); | 10520 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); |
10419 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); | 10521 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); |
10420 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); | 10522 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10523 | phba->sli4_hba.pc_sli4_params.cqv); | ||
10524 | if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { | ||
10525 | bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, | ||
10526 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10527 | bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, | ||
10528 | eq->queue_id); | ||
10529 | } else { | ||
10530 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, | ||
10531 | eq->queue_id); | ||
10532 | } | ||
10421 | switch (cq->entry_count) { | 10533 | switch (cq->entry_count) { |
10422 | default: | 10534 | default: |
10423 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10535 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10449 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10561 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10450 | 10562 | ||
10451 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10563 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10452 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10453 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10564 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10454 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10565 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10455 | if (shdr_status || shdr_add_status || rc) { | 10566 | if (shdr_status || shdr_add_status || rc) { |
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10515 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); | 10626 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); |
10516 | switch (mq->entry_count) { | 10627 | switch (mq->entry_count) { |
10517 | case 16: | 10628 | case 16: |
10518 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10629 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10519 | LPFC_MQ_CNT_16); | 10630 | LPFC_MQ_RING_SIZE_16); |
10520 | break; | 10631 | break; |
10521 | case 32: | 10632 | case 32: |
10522 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10633 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10523 | LPFC_MQ_CNT_32); | 10634 | LPFC_MQ_RING_SIZE_32); |
10524 | break; | 10635 | break; |
10525 | case 64: | 10636 | case 64: |
10526 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10637 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10527 | LPFC_MQ_CNT_64); | 10638 | LPFC_MQ_RING_SIZE_64); |
10528 | break; | 10639 | break; |
10529 | case 128: | 10640 | case 128: |
10530 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10641 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10531 | LPFC_MQ_CNT_128); | 10642 | LPFC_MQ_RING_SIZE_128); |
10532 | break; | 10643 | break; |
10533 | } | 10644 | } |
10534 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10645 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10586 | length, LPFC_SLI4_MBX_EMBED); | 10697 | length, LPFC_SLI4_MBX_EMBED); |
10587 | 10698 | ||
10588 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; | 10699 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; |
10700 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10589 | bf_set(lpfc_mbx_mq_create_ext_num_pages, | 10701 | bf_set(lpfc_mbx_mq_create_ext_num_pages, |
10590 | &mq_create_ext->u.request, mq->page_count); | 10702 | &mq_create_ext->u.request, mq->page_count); |
10591 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, | 10703 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, |
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10598 | &mq_create_ext->u.request, 1); | 10710 | &mq_create_ext->u.request, 1); |
10599 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, | 10711 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, |
10600 | &mq_create_ext->u.request, 1); | 10712 | &mq_create_ext->u.request, 1); |
10601 | bf_set(lpfc_mq_context_cq_id, | ||
10602 | &mq_create_ext->u.request.context, cq->queue_id); | ||
10603 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); | 10713 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); |
10714 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10715 | phba->sli4_hba.pc_sli4_params.mqv); | ||
10716 | if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) | ||
10717 | bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, | ||
10718 | cq->queue_id); | ||
10719 | else | ||
10720 | bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, | ||
10721 | cq->queue_id); | ||
10604 | switch (mq->entry_count) { | 10722 | switch (mq->entry_count) { |
10605 | default: | 10723 | default: |
10606 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10724 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10610 | return -EINVAL; | 10728 | return -EINVAL; |
10611 | /* otherwise default to smallest count (drop through) */ | 10729 | /* otherwise default to smallest count (drop through) */ |
10612 | case 16: | 10730 | case 16: |
10613 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10731 | bf_set(lpfc_mq_context_ring_size, |
10614 | LPFC_MQ_CNT_16); | 10732 | &mq_create_ext->u.request.context, |
10733 | LPFC_MQ_RING_SIZE_16); | ||
10615 | break; | 10734 | break; |
10616 | case 32: | 10735 | case 32: |
10617 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10736 | bf_set(lpfc_mq_context_ring_size, |
10618 | LPFC_MQ_CNT_32); | 10737 | &mq_create_ext->u.request.context, |
10738 | LPFC_MQ_RING_SIZE_32); | ||
10619 | break; | 10739 | break; |
10620 | case 64: | 10740 | case 64: |
10621 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10741 | bf_set(lpfc_mq_context_ring_size, |
10622 | LPFC_MQ_CNT_64); | 10742 | &mq_create_ext->u.request.context, |
10743 | LPFC_MQ_RING_SIZE_64); | ||
10623 | break; | 10744 | break; |
10624 | case 128: | 10745 | case 128: |
10625 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10746 | bf_set(lpfc_mq_context_ring_size, |
10626 | LPFC_MQ_CNT_128); | 10747 | &mq_create_ext->u.request.context, |
10748 | LPFC_MQ_RING_SIZE_128); | ||
10627 | break; | 10749 | break; |
10628 | } | 10750 | } |
10629 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10751 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10634 | putPaddrHigh(dmabuf->phys); | 10756 | putPaddrHigh(dmabuf->phys); |
10635 | } | 10757 | } |
10636 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10758 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10637 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10638 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, | 10759 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, |
10639 | &mq_create_ext->u.response); | 10760 | &mq_create_ext->u.response); |
10640 | if (rc != MBX_SUCCESS) { | 10761 | if (rc != MBX_SUCCESS) { |
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10711 | uint32_t shdr_status, shdr_add_status; | 10832 | uint32_t shdr_status, shdr_add_status; |
10712 | union lpfc_sli4_cfg_shdr *shdr; | 10833 | union lpfc_sli4_cfg_shdr *shdr; |
10713 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; | 10834 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
10835 | struct dma_address *page; | ||
10714 | 10836 | ||
10715 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10837 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10716 | hw_page_size = SLI4_PAGE_SIZE; | 10838 | hw_page_size = SLI4_PAGE_SIZE; |
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10724 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, | 10846 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, |
10725 | length, LPFC_SLI4_MBX_EMBED); | 10847 | length, LPFC_SLI4_MBX_EMBED); |
10726 | wq_create = &mbox->u.mqe.un.wq_create; | 10848 | wq_create = &mbox->u.mqe.un.wq_create; |
10849 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10727 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, | 10850 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, |
10728 | wq->page_count); | 10851 | wq->page_count); |
10729 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, | 10852 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, |
10730 | cq->queue_id); | 10853 | cq->queue_id); |
10854 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10855 | phba->sli4_hba.pc_sli4_params.wqv); | ||
10856 | if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { | ||
10857 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, | ||
10858 | wq->entry_count); | ||
10859 | switch (wq->entry_size) { | ||
10860 | default: | ||
10861 | case 64: | ||
10862 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10863 | &wq_create->u.request_1, | ||
10864 | LPFC_WQ_WQE_SIZE_64); | ||
10865 | break; | ||
10866 | case 128: | ||
10867 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10868 | &wq_create->u.request_1, | ||
10869 | LPFC_WQ_WQE_SIZE_128); | ||
10870 | break; | ||
10871 | } | ||
10872 | bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, | ||
10873 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10874 | page = wq_create->u.request_1.page; | ||
10875 | } else { | ||
10876 | page = wq_create->u.request.page; | ||
10877 | } | ||
10731 | list_for_each_entry(dmabuf, &wq->page_list, list) { | 10878 | list_for_each_entry(dmabuf, &wq->page_list, list) { |
10732 | memset(dmabuf->virt, 0, hw_page_size); | 10879 | memset(dmabuf->virt, 0, hw_page_size); |
10733 | wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 10880 | page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); |
10734 | putPaddrLow(dmabuf->phys); | 10881 | page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); |
10735 | wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
10736 | putPaddrHigh(dmabuf->phys); | ||
10737 | } | 10882 | } |
10738 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10883 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10739 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10884 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10740 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10741 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10885 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10742 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10886 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10743 | if (shdr_status || shdr_add_status || rc) { | 10887 | if (shdr_status || shdr_add_status || rc) { |
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10815 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 10959 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10816 | length, LPFC_SLI4_MBX_EMBED); | 10960 | length, LPFC_SLI4_MBX_EMBED); |
10817 | rq_create = &mbox->u.mqe.un.rq_create; | 10961 | rq_create = &mbox->u.mqe.un.rq_create; |
10818 | switch (hrq->entry_count) { | 10962 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; |
10819 | default: | 10963 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10820 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10964 | phba->sli4_hba.pc_sli4_params.rqv); |
10821 | "2535 Unsupported RQ count. (%d)\n", | 10965 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10822 | hrq->entry_count); | 10966 | bf_set(lpfc_rq_context_rqe_count_1, |
10823 | if (hrq->entry_count < 512) | 10967 | &rq_create->u.request.context, |
10824 | return -EINVAL; | 10968 | hrq->entry_count); |
10825 | /* otherwise default to smallest count (drop through) */ | 10969 | rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; |
10826 | case 512: | 10970 | } else { |
10827 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10971 | switch (hrq->entry_count) { |
10828 | LPFC_RQ_RING_SIZE_512); | 10972 | default: |
10829 | break; | 10973 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10830 | case 1024: | 10974 | "2535 Unsupported RQ count. (%d)\n", |
10831 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10975 | hrq->entry_count); |
10832 | LPFC_RQ_RING_SIZE_1024); | 10976 | if (hrq->entry_count < 512) |
10833 | break; | 10977 | return -EINVAL; |
10834 | case 2048: | 10978 | /* otherwise default to smallest count (drop through) */ |
10835 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10979 | case 512: |
10836 | LPFC_RQ_RING_SIZE_2048); | 10980 | bf_set(lpfc_rq_context_rqe_count, |
10837 | break; | 10981 | &rq_create->u.request.context, |
10838 | case 4096: | 10982 | LPFC_RQ_RING_SIZE_512); |
10839 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10983 | break; |
10840 | LPFC_RQ_RING_SIZE_4096); | 10984 | case 1024: |
10841 | break; | 10985 | bf_set(lpfc_rq_context_rqe_count, |
10986 | &rq_create->u.request.context, | ||
10987 | LPFC_RQ_RING_SIZE_1024); | ||
10988 | break; | ||
10989 | case 2048: | ||
10990 | bf_set(lpfc_rq_context_rqe_count, | ||
10991 | &rq_create->u.request.context, | ||
10992 | LPFC_RQ_RING_SIZE_2048); | ||
10993 | break; | ||
10994 | case 4096: | ||
10995 | bf_set(lpfc_rq_context_rqe_count, | ||
10996 | &rq_create->u.request.context, | ||
10997 | LPFC_RQ_RING_SIZE_4096); | ||
10998 | break; | ||
10999 | } | ||
11000 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11001 | LPFC_HDR_BUF_SIZE); | ||
10842 | } | 11002 | } |
10843 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11003 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10844 | cq->queue_id); | 11004 | cq->queue_id); |
10845 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11005 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10846 | hrq->page_count); | 11006 | hrq->page_count); |
10847 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10848 | LPFC_HDR_BUF_SIZE); | ||
10849 | list_for_each_entry(dmabuf, &hrq->page_list, list) { | 11007 | list_for_each_entry(dmabuf, &hrq->page_list, list) { |
10850 | memset(dmabuf->virt, 0, hw_page_size); | 11008 | memset(dmabuf->virt, 0, hw_page_size); |
10851 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11009 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10855 | } | 11013 | } |
10856 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 11014 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10857 | /* The IOCTL status is embedded in the mailbox subheader. */ | 11015 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10858 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; | ||
10859 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 11016 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10860 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 11017 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10861 | if (shdr_status || shdr_add_status || rc) { | 11018 | if (shdr_status || shdr_add_status || rc) { |
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10881 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | 11038 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
10882 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 11039 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10883 | length, LPFC_SLI4_MBX_EMBED); | 11040 | length, LPFC_SLI4_MBX_EMBED); |
10884 | switch (drq->entry_count) { | 11041 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10885 | default: | 11042 | phba->sli4_hba.pc_sli4_params.rqv); |
10886 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 11043 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10887 | "2536 Unsupported RQ count. (%d)\n", | 11044 | bf_set(lpfc_rq_context_rqe_count_1, |
10888 | drq->entry_count); | 11045 | &rq_create->u.request.context, |
10889 | if (drq->entry_count < 512) | 11046 | hrq->entry_count); |
10890 | return -EINVAL; | 11047 | rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; |
10891 | /* otherwise default to smallest count (drop through) */ | 11048 | } else { |
10892 | case 512: | 11049 | switch (drq->entry_count) { |
10893 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11050 | default: |
10894 | LPFC_RQ_RING_SIZE_512); | 11051 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10895 | break; | 11052 | "2536 Unsupported RQ count. (%d)\n", |
10896 | case 1024: | 11053 | drq->entry_count); |
10897 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11054 | if (drq->entry_count < 512) |
10898 | LPFC_RQ_RING_SIZE_1024); | 11055 | return -EINVAL; |
10899 | break; | 11056 | /* otherwise default to smallest count (drop through) */ |
10900 | case 2048: | 11057 | case 512: |
10901 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11058 | bf_set(lpfc_rq_context_rqe_count, |
10902 | LPFC_RQ_RING_SIZE_2048); | 11059 | &rq_create->u.request.context, |
10903 | break; | 11060 | LPFC_RQ_RING_SIZE_512); |
10904 | case 4096: | 11061 | break; |
10905 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11062 | case 1024: |
10906 | LPFC_RQ_RING_SIZE_4096); | 11063 | bf_set(lpfc_rq_context_rqe_count, |
10907 | break; | 11064 | &rq_create->u.request.context, |
11065 | LPFC_RQ_RING_SIZE_1024); | ||
11066 | break; | ||
11067 | case 2048: | ||
11068 | bf_set(lpfc_rq_context_rqe_count, | ||
11069 | &rq_create->u.request.context, | ||
11070 | LPFC_RQ_RING_SIZE_2048); | ||
11071 | break; | ||
11072 | case 4096: | ||
11073 | bf_set(lpfc_rq_context_rqe_count, | ||
11074 | &rq_create->u.request.context, | ||
11075 | LPFC_RQ_RING_SIZE_4096); | ||
11076 | break; | ||
11077 | } | ||
11078 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11079 | LPFC_DATA_BUF_SIZE); | ||
10908 | } | 11080 | } |
10909 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11081 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10910 | cq->queue_id); | 11082 | cq->queue_id); |
10911 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11083 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10912 | drq->page_count); | 11084 | drq->page_count); |
10913 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10914 | LPFC_DATA_BUF_SIZE); | ||
10915 | list_for_each_entry(dmabuf, &drq->page_list, list) { | 11085 | list_for_each_entry(dmabuf, &drq->page_list, list) { |
10916 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11086 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
10917 | putPaddrLow(dmabuf->phys); | 11087 | putPaddrLow(dmabuf->phys); |
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11580 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; | 11750 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; |
11581 | char *type_names[] = FC_TYPE_NAMES_INIT; | 11751 | char *type_names[] = FC_TYPE_NAMES_INIT; |
11582 | struct fc_vft_header *fc_vft_hdr; | 11752 | struct fc_vft_header *fc_vft_hdr; |
11753 | uint32_t *header = (uint32_t *) fc_hdr; | ||
11583 | 11754 | ||
11584 | switch (fc_hdr->fh_r_ctl) { | 11755 | switch (fc_hdr->fh_r_ctl) { |
11585 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ | 11756 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ |
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11628 | default: | 11799 | default: |
11629 | goto drop; | 11800 | goto drop; |
11630 | } | 11801 | } |
11802 | |||
11631 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 11803 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
11632 | "2538 Received frame rctl:%s type:%s\n", | 11804 | "2538 Received frame rctl:%s type:%s " |
11805 | "Frame Data:%08x %08x %08x %08x %08x %08x\n", | ||
11633 | rctl_names[fc_hdr->fh_r_ctl], | 11806 | rctl_names[fc_hdr->fh_r_ctl], |
11634 | type_names[fc_hdr->fh_type]); | 11807 | type_names[fc_hdr->fh_type], |
11808 | be32_to_cpu(header[0]), be32_to_cpu(header[1]), | ||
11809 | be32_to_cpu(header[2]), be32_to_cpu(header[3]), | ||
11810 | be32_to_cpu(header[4]), be32_to_cpu(header[5])); | ||
11635 | return 0; | 11811 | return 0; |
11636 | drop: | 11812 | drop: |
11637 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | 11813 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, |
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, | |||
11928 | } | 12104 | } |
11929 | 12105 | ||
11930 | /** | 12106 | /** |
11931 | * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler | 12107 | * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler |
11932 | * @phba: Pointer to HBA context object. | 12108 | * @phba: Pointer to HBA context object. |
11933 | * @cmd_iocbq: pointer to the command iocbq structure. | 12109 | * @cmd_iocbq: pointer to the command iocbq structure. |
11934 | * @rsp_iocbq: pointer to the response iocbq structure. | 12110 | * @rsp_iocbq: pointer to the response iocbq structure. |
11935 | * | 12111 | * |
11936 | * This function handles the sequence abort accept iocb command complete | 12112 | * This function handles the sequence abort response iocb command complete |
11937 | * event. It properly releases the memory allocated to the sequence abort | 12113 | * event. It properly releases the memory allocated to the sequence abort |
11938 | * accept iocb. | 12114 | * accept iocb. |
11939 | **/ | 12115 | **/ |
11940 | static void | 12116 | static void |
11941 | lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | 12117 | lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, |
11942 | struct lpfc_iocbq *cmd_iocbq, | 12118 | struct lpfc_iocbq *cmd_iocbq, |
11943 | struct lpfc_iocbq *rsp_iocbq) | 12119 | struct lpfc_iocbq *rsp_iocbq) |
11944 | { | 12120 | { |
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | |||
11947 | } | 12123 | } |
11948 | 12124 | ||
11949 | /** | 12125 | /** |
11950 | * lpfc_sli4_seq_abort_acc - Accept sequence abort | 12126 | * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort |
11951 | * @phba: Pointer to HBA context object. | 12127 | * @phba: Pointer to HBA context object. |
11952 | * @fc_hdr: pointer to a FC frame header. | 12128 | * @fc_hdr: pointer to a FC frame header. |
11953 | * | 12129 | * |
11954 | * This function sends a basic accept to a previous unsol sequence abort | 12130 | * This function sends a basic response to a previous unsol sequence abort |
11955 | * event after aborting the sequence handling. | 12131 | * event after aborting the sequence handling. |
11956 | **/ | 12132 | **/ |
11957 | static void | 12133 | static void |
11958 | lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | 12134 | lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, |
11959 | struct fc_frame_header *fc_hdr) | 12135 | struct fc_frame_header *fc_hdr) |
11960 | { | 12136 | { |
11961 | struct lpfc_iocbq *ctiocb = NULL; | 12137 | struct lpfc_iocbq *ctiocb = NULL; |
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11963 | uint16_t oxid, rxid; | 12139 | uint16_t oxid, rxid; |
11964 | uint32_t sid, fctl; | 12140 | uint32_t sid, fctl; |
11965 | IOCB_t *icmd; | 12141 | IOCB_t *icmd; |
12142 | int rc; | ||
11966 | 12143 | ||
11967 | if (!lpfc_is_link_up(phba)) | 12144 | if (!lpfc_is_link_up(phba)) |
11968 | return; | 12145 | return; |
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11983 | + phba->sli4_hba.max_cfg_param.xri_base)) | 12160 | + phba->sli4_hba.max_cfg_param.xri_base)) |
11984 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); | 12161 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); |
11985 | 12162 | ||
11986 | /* Allocate buffer for acc iocb */ | 12163 | /* Allocate buffer for rsp iocb */ |
11987 | ctiocb = lpfc_sli_get_iocbq(phba); | 12164 | ctiocb = lpfc_sli_get_iocbq(phba); |
11988 | if (!ctiocb) | 12165 | if (!ctiocb) |
11989 | return; | 12166 | return; |
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
12008 | 12185 | ||
12009 | ctiocb->iocb_cmpl = NULL; | 12186 | ctiocb->iocb_cmpl = NULL; |
12010 | ctiocb->vport = phba->pport; | 12187 | ctiocb->vport = phba->pport; |
12011 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; | 12188 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; |
12189 | ctiocb->sli4_xritag = NO_XRI; | ||
12190 | |||
12191 | /* If the oxid maps to the FCP XRI range or if it is out of range, | ||
12192 | * send a BLS_RJT. The driver no longer has that exchange. | ||
12193 | * Override the IOCB for a BA_RJT. | ||
12194 | */ | ||
12195 | if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + | ||
12196 | phba->sli4_hba.max_cfg_param.xri_base) || | ||
12197 | oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + | ||
12198 | phba->sli4_hba.max_cfg_param.xri_base)) { | ||
12199 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; | ||
12200 | bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); | ||
12201 | bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); | ||
12202 | bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); | ||
12203 | } | ||
12012 | 12204 | ||
12013 | if (fctl & FC_FC_EX_CTX) { | 12205 | if (fctl & FC_FC_EX_CTX) { |
12014 | /* ABTS sent by responder to CT exchange, construction | 12206 | /* ABTS sent by responder to CT exchange, construction |
12015 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG | 12207 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG |
12016 | * field and RX_ID from ABTS for RX_ID field. | 12208 | * field and RX_ID from ABTS for RX_ID field. |
12017 | */ | 12209 | */ |
12018 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); | 12210 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); |
12019 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); | 12211 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); |
12020 | ctiocb->sli4_xritag = oxid; | ||
12021 | } else { | 12212 | } else { |
12022 | /* ABTS sent by initiator to CT exchange, construction | 12213 | /* ABTS sent by initiator to CT exchange, construction |
12023 | * of BA_ACC will need to allocate a new XRI as for the | 12214 | * of BA_ACC will need to allocate a new XRI as for the |
12024 | * XRI_TAG and RX_ID fields. | 12215 | * XRI_TAG and RX_ID fields. |
12025 | */ | 12216 | */ |
12026 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); | 12217 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); |
12027 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); | 12218 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); |
12028 | ctiocb->sli4_xritag = NO_XRI; | ||
12029 | } | 12219 | } |
12030 | bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); | 12220 | bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); |
12031 | 12221 | ||
12032 | /* Xmit CT abts accept on exchange <xid> */ | 12222 | /* Xmit CT abts response on exchange <xid> */ |
12033 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 12223 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
12034 | "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", | 12224 | "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", |
12035 | CMD_XMIT_BLS_RSP64_CX, phba->link_state); | 12225 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); |
12036 | lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | 12226 | |
12227 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
12228 | if (rc == IOCB_ERROR) { | ||
12229 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, | ||
12230 | "2925 Failed to issue CT ABTS RSP x%x on " | ||
12231 | "xri x%x, Data x%x\n", | ||
12232 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, | ||
12233 | phba->link_state); | ||
12234 | lpfc_sli_release_iocbq(phba, ctiocb); | ||
12235 | } | ||
12037 | } | 12236 | } |
12038 | 12237 | ||
12039 | /** | 12238 | /** |
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, | |||
12081 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 12280 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
12082 | } | 12281 | } |
12083 | /* Send basic accept (BA_ACC) to the abort requester */ | 12282 | /* Send basic accept (BA_ACC) to the abort requester */ |
12084 | lpfc_sli4_seq_abort_acc(phba, &fc_hdr); | 12283 | lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); |
12085 | } | 12284 | } |
12086 | 12285 | ||
12087 | /** | 12286 | /** |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 595056b89608..1a3cbf88f2ce 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0a4d376dbca5..2404d1d65563 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.21" | 21 | #define LPFC_DRIVER_VERSION "8.3.22" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index e8a6f1cf1e4b..5e001ffd4c13 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -1748,6 +1748,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) | |||
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /** | 1750 | /** |
1751 | * _base_display_hp_branding - Display branding string | ||
1752 | * @ioc: per adapter object | ||
1753 | * | ||
1754 | * Return nothing. | ||
1755 | */ | ||
1756 | static void | ||
1757 | _base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc) | ||
1758 | { | ||
1759 | if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) | ||
1760 | return; | ||
1761 | |||
1762 | switch (ioc->pdev->device) { | ||
1763 | case MPI2_MFGPAGE_DEVID_SAS2004: | ||
1764 | switch (ioc->pdev->subsystem_device) { | ||
1765 | case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: | ||
1766 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1767 | MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); | ||
1768 | break; | ||
1769 | default: | ||
1770 | break; | ||
1771 | } | ||
1772 | case MPI2_MFGPAGE_DEVID_SAS2308_2: | ||
1773 | switch (ioc->pdev->subsystem_device) { | ||
1774 | case MPT2SAS_HP_2_4_INTERNAL_SSDID: | ||
1775 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1776 | MPT2SAS_HP_2_4_INTERNAL_BRANDING); | ||
1777 | break; | ||
1778 | case MPT2SAS_HP_2_4_EXTERNAL_SSDID: | ||
1779 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1780 | MPT2SAS_HP_2_4_EXTERNAL_BRANDING); | ||
1781 | break; | ||
1782 | case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: | ||
1783 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1784 | MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); | ||
1785 | break; | ||
1786 | case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: | ||
1787 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1788 | MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); | ||
1789 | break; | ||
1790 | default: | ||
1791 | break; | ||
1792 | } | ||
1793 | default: | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | /** | ||
1751 | * _base_display_ioc_capabilities - Disply IOC's capabilities. | 1799 | * _base_display_ioc_capabilities - Disply IOC's capabilities. |
1752 | * @ioc: per adapter object | 1800 | * @ioc: per adapter object |
1753 | * | 1801 | * |
@@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
1778 | 1826 | ||
1779 | _base_display_dell_branding(ioc); | 1827 | _base_display_dell_branding(ioc); |
1780 | _base_display_intel_branding(ioc); | 1828 | _base_display_intel_branding(ioc); |
1829 | _base_display_hp_branding(ioc); | ||
1781 | 1830 | ||
1782 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); | 1831 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); |
1783 | 1832 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index a3f8aa9baea4..500328245f61 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -168,6 +168,26 @@ | |||
168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E | 168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E |
169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F | 169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F |
170 | 170 | ||
171 | |||
172 | /* | ||
173 | * HP HBA branding | ||
174 | */ | ||
175 | #define MPT2SAS_HP_3PAR_SSVID 0x1590 | ||
176 | #define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter" | ||
177 | #define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter" | ||
178 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter" | ||
179 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter" | ||
180 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter" | ||
181 | |||
182 | /* | ||
183 | * HO HBA SSDIDs | ||
184 | */ | ||
185 | #define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 | ||
186 | #define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 | ||
187 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 | ||
188 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 | ||
189 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 | ||
190 | |||
171 | /* | 191 | /* |
172 | * per target private data | 192 | * per target private data |
173 | */ | 193 | */ |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 19ad34f381a5..938d045e4180 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { | |||
663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, | 663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, |
664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, | 664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, |
665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, | 665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, |
666 | { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, | ||
667 | { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, | ||
668 | { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, | ||
669 | { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, | ||
670 | { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, | ||
671 | { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, | ||
672 | { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, | ||
666 | 673 | ||
667 | { } /* terminate list */ | 674 | { } /* terminate list */ |
668 | }; | 675 | }; |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 2fc0045b1a52..c1f8d1b150f7 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 | 53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define ISP4XXX_PCI_FN_1 0x1 | ||
57 | #define ISP4XXX_PCI_FN_2 0x3 | ||
58 | |||
56 | #define QLA_SUCCESS 0 | 59 | #define QLA_SUCCESS 0 |
57 | #define QLA_ERROR 1 | 60 | #define QLA_ERROR 1 |
58 | 61 | ||
@@ -233,9 +236,6 @@ struct ddb_entry { | |||
233 | 236 | ||
234 | unsigned long flags; /* DDB Flags */ | 237 | unsigned long flags; /* DDB Flags */ |
235 | 238 | ||
236 | unsigned long dev_scan_wait_to_start_relogin; | ||
237 | unsigned long dev_scan_wait_to_complete_relogin; | ||
238 | |||
239 | uint16_t fw_ddb_index; /* DDB firmware index */ | 239 | uint16_t fw_ddb_index; /* DDB firmware index */ |
240 | uint16_t options; | 240 | uint16_t options; |
241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ | 241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ |
@@ -289,8 +289,6 @@ struct ddb_entry { | |||
289 | * DDB flags. | 289 | * DDB flags. |
290 | */ | 290 | */ |
291 | #define DF_RELOGIN 0 /* Relogin to device */ | 291 | #define DF_RELOGIN 0 /* Relogin to device */ |
292 | #define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL | ||
293 | * logged it out */ | ||
294 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ | 292 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ |
295 | #define DF_FO_MASKED 3 | 293 | #define DF_FO_MASKED 3 |
296 | 294 | ||
@@ -376,7 +374,7 @@ struct scsi_qla_host { | |||
376 | #define AF_LINK_UP 8 /* 0x00000100 */ | 374 | #define AF_LINK_UP 8 /* 0x00000100 */ |
377 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ | 375 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ |
378 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ | 376 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ |
379 | #define AF_HBA_GOING_AWAY 12 /* 0x00001000 */ | 377 | #define AF_HA_REMOVAL 12 /* 0x00001000 */ |
380 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ | 378 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ |
381 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ | 379 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ |
382 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ | 380 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ |
@@ -479,7 +477,6 @@ struct scsi_qla_host { | |||
479 | uint32_t timer_active; | 477 | uint32_t timer_active; |
480 | 478 | ||
481 | /* Recovery Timers */ | 479 | /* Recovery Timers */ |
482 | uint32_t discovery_wait; | ||
483 | atomic_t check_relogin_timeouts; | 480 | atomic_t check_relogin_timeouts; |
484 | uint32_t retry_reset_ha_cnt; | 481 | uint32_t retry_reset_ha_cnt; |
485 | uint32_t isp_reset_timer; /* reset test timer */ | 482 | uint32_t isp_reset_timer; /* reset test timer */ |
@@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) | |||
765 | /* Defines for process_aen() */ | 762 | /* Defines for process_aen() */ |
766 | #define PROCESS_ALL_AENS 0 | 763 | #define PROCESS_ALL_AENS 0 |
767 | #define FLUSH_DDB_CHANGED_AENS 1 | 764 | #define FLUSH_DDB_CHANGED_AENS 1 |
768 | #define RELOGIN_DDB_CHANGED_AENS 2 | ||
769 | 765 | ||
770 | #endif /*_QLA4XXX_H */ | 766 | #endif /*_QLA4XXX_H */ |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c1985792f034..31e2bf97198c 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -455,6 +455,7 @@ struct addr_ctrl_blk { | |||
455 | uint8_t res0; /* 07 */ | 455 | uint8_t res0; /* 07 */ |
456 | uint16_t eth_mtu_size; /* 08-09 */ | 456 | uint16_t eth_mtu_size; /* 08-09 */ |
457 | uint16_t add_fw_options; /* 0A-0B */ | 457 | uint16_t add_fw_options; /* 0A-0B */ |
458 | #define SERIALIZE_TASK_MGMT 0x0400 | ||
458 | 459 | ||
459 | uint8_t hb_interval; /* 0C */ | 460 | uint8_t hb_interval; /* 0C */ |
460 | uint8_t inst_num; /* 0D */ | 461 | uint8_t inst_num; /* 0D */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 8fad99b7eef4..cc53e3fbd78c 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); | |||
136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); | 136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); |
137 | 137 | ||
138 | extern int ql4xextended_error_logging; | 138 | extern int ql4xextended_error_logging; |
139 | extern int ql4xdiscoverywait; | ||
140 | extern int ql4xdontresethba; | 139 | extern int ql4xdontresethba; |
141 | extern int ql4xenablemsix; | 140 | extern int ql4xenablemsix; |
142 | 141 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1629c48c35ef..bbb2e903d38a 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) | |||
723 | return relogin; | 723 | return relogin; |
724 | } | 724 | } |
725 | 725 | ||
726 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
727 | { | ||
728 | unsigned long wtime; | ||
729 | |||
730 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
731 | * Auto connect. We are basically doing get_firmware_ddb() | ||
732 | * to determine whether we need to log back in or not. | ||
733 | * Trying to do a set ddb before we have processed 0x8014 | ||
734 | * will result in another set_ddb() for the same ddb. In other | ||
735 | * words there will be stale entries in the aen_q. | ||
736 | */ | ||
737 | wtime = jiffies + (2 * HZ); | ||
738 | do { | ||
739 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
740 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
741 | return; | ||
742 | |||
743 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
744 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
745 | |||
746 | msleep(1000); | ||
747 | } while (!time_after_eq(jiffies, wtime)); | ||
748 | } | ||
749 | |||
726 | /** | 750 | /** |
727 | * qla4xxx_configure_ddbs - builds driver ddb list | 751 | * qla4xxx_build_ddb_list - builds driver ddb list |
728 | * @ha: Pointer to host adapter structure. | 752 | * @ha: Pointer to host adapter structure. |
729 | * | 753 | * |
730 | * This routine searches for all valid firmware ddb entries and builds | 754 | * This routine searches for all valid firmware ddb entries and builds |
731 | * an internal ddb list. Ddbs that are considered valid are those with | 755 | * an internal ddb list. Ddbs that are considered valid are those with |
732 | * a device state of SESSION_ACTIVE. | 756 | * a device state of SESSION_ACTIVE. |
757 | * A relogin (set_ddb) is issued for DDBs that are not online. | ||
733 | **/ | 758 | **/ |
734 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | 759 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) |
735 | { | 760 | { |
@@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | |||
744 | uint32_t ipv6_device; | 769 | uint32_t ipv6_device; |
745 | uint32_t new_tgt; | 770 | uint32_t new_tgt; |
746 | 771 | ||
772 | qla4xxx_flush_AENS(ha); | ||
773 | |||
747 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), | 774 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), |
748 | &fw_ddb_entry_dma, GFP_KERNEL); | 775 | &fw_ddb_entry_dma, GFP_KERNEL); |
749 | if (fw_ddb_entry == NULL) { | 776 | if (fw_ddb_entry == NULL) { |
@@ -847,144 +874,6 @@ exit_build_ddb_list_no_free: | |||
847 | return status; | 874 | return status; |
848 | } | 875 | } |
849 | 876 | ||
850 | struct qla4_relog_scan { | ||
851 | int halt_wait; | ||
852 | uint32_t conn_err; | ||
853 | uint32_t fw_ddb_index; | ||
854 | uint32_t next_fw_ddb_index; | ||
855 | uint32_t fw_ddb_device_state; | ||
856 | }; | ||
857 | |||
858 | static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) | ||
859 | { | ||
860 | struct ddb_entry *ddb_entry; | ||
861 | |||
862 | if (qla4_is_relogin_allowed(ha, rs->conn_err)) { | ||
863 | /* We either have a device that is in | ||
864 | * the process of relogging in or a | ||
865 | * device that is waiting to be | ||
866 | * relogged in */ | ||
867 | rs->halt_wait = 0; | ||
868 | |||
869 | ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, | ||
870 | rs->fw_ddb_index); | ||
871 | if (ddb_entry == NULL) | ||
872 | return QLA_ERROR; | ||
873 | |||
874 | if (ddb_entry->dev_scan_wait_to_start_relogin != 0 | ||
875 | && time_after_eq(jiffies, | ||
876 | ddb_entry-> | ||
877 | dev_scan_wait_to_start_relogin)) | ||
878 | { | ||
879 | ddb_entry->dev_scan_wait_to_start_relogin = 0; | ||
880 | qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); | ||
881 | } | ||
882 | } | ||
883 | return QLA_SUCCESS; | ||
884 | } | ||
885 | |||
886 | static int qla4_scan_for_relogin(struct scsi_qla_host *ha, | ||
887 | struct qla4_relog_scan *rs) | ||
888 | { | ||
889 | int error; | ||
890 | |||
891 | /* scan for relogins | ||
892 | * ----------------- */ | ||
893 | for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; | ||
894 | rs->fw_ddb_index = rs->next_fw_ddb_index) { | ||
895 | if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, | ||
896 | NULL, &rs->next_fw_ddb_index, | ||
897 | &rs->fw_ddb_device_state, | ||
898 | &rs->conn_err, NULL, NULL) | ||
899 | == QLA_ERROR) | ||
900 | return QLA_ERROR; | ||
901 | |||
902 | if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) | ||
903 | rs->halt_wait = 0; | ||
904 | |||
905 | if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || | ||
906 | rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { | ||
907 | error = qla4_test_rdy(ha, rs); | ||
908 | if (error) | ||
909 | return error; | ||
910 | } | ||
911 | |||
912 | /* We know we've reached the last device when | ||
913 | * next_fw_ddb_index is 0 */ | ||
914 | if (rs->next_fw_ddb_index == 0) | ||
915 | break; | ||
916 | } | ||
917 | return QLA_SUCCESS; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * qla4xxx_devices_ready - wait for target devices to be logged in | ||
922 | * @ha: pointer to adapter structure | ||
923 | * | ||
924 | * This routine waits up to ql4xdiscoverywait seconds | ||
925 | * F/W database during driver load time. | ||
926 | **/ | ||
927 | static int qla4xxx_devices_ready(struct scsi_qla_host *ha) | ||
928 | { | ||
929 | int error; | ||
930 | unsigned long discovery_wtime; | ||
931 | struct qla4_relog_scan rs; | ||
932 | |||
933 | discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); | ||
934 | |||
935 | DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); | ||
936 | do { | ||
937 | /* poll for AEN. */ | ||
938 | qla4xxx_get_firmware_state(ha); | ||
939 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { | ||
940 | /* Set time-between-relogin timer */ | ||
941 | qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); | ||
942 | } | ||
943 | |||
944 | /* if no relogins active or needed, halt discvery wait */ | ||
945 | rs.halt_wait = 1; | ||
946 | |||
947 | error = qla4_scan_for_relogin(ha, &rs); | ||
948 | |||
949 | if (rs.halt_wait) { | ||
950 | DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " | ||
951 | "Ready.\n", ha->host_no, __func__)); | ||
952 | return QLA_SUCCESS; | ||
953 | } | ||
954 | |||
955 | msleep(2000); | ||
956 | } while (!time_after_eq(jiffies, discovery_wtime)); | ||
957 | |||
958 | DEBUG3(qla4xxx_get_conn_event_log(ha)); | ||
959 | |||
960 | return QLA_SUCCESS; | ||
961 | } | ||
962 | |||
963 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
964 | { | ||
965 | unsigned long wtime; | ||
966 | |||
967 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
968 | * Auto connect. We are basically doing get_firmware_ddb() | ||
969 | * to determine whether we need to log back in or not. | ||
970 | * Trying to do a set ddb before we have processed 0x8014 | ||
971 | * will result in another set_ddb() for the same ddb. In other | ||
972 | * words there will be stale entries in the aen_q. | ||
973 | */ | ||
974 | wtime = jiffies + (2 * HZ); | ||
975 | do { | ||
976 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
977 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
978 | return; | ||
979 | |||
980 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
981 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
982 | |||
983 | msleep(1000); | ||
984 | } while (!time_after_eq(jiffies, wtime)); | ||
985 | |||
986 | } | ||
987 | |||
988 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | 877 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) |
989 | { | 878 | { |
990 | uint16_t fw_ddb_index; | 879 | uint16_t fw_ddb_index; |
@@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | |||
996 | 885 | ||
997 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) | 886 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) |
998 | ha->fw_ddb_index_map[fw_ddb_index] = | 887 | ha->fw_ddb_index_map[fw_ddb_index] = |
999 | (struct ddb_entry *)INVALID_ENTRY; | 888 | (struct ddb_entry *)INVALID_ENTRY; |
1000 | 889 | ||
1001 | ha->tot_ddbs = 0; | 890 | ha->tot_ddbs = 0; |
1002 | 891 | ||
1003 | qla4xxx_flush_AENS(ha); | 892 | /* Perform device discovery and build ddb list. */ |
1004 | 893 | status = qla4xxx_build_ddb_list(ha); | |
1005 | /* Wait for an AEN */ | ||
1006 | qla4xxx_devices_ready(ha); | ||
1007 | |||
1008 | /* | ||
1009 | * First perform device discovery for active | ||
1010 | * fw ddb indexes and build | ||
1011 | * ddb list. | ||
1012 | */ | ||
1013 | if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) | ||
1014 | return status; | ||
1015 | |||
1016 | /* | ||
1017 | * Targets can come online after the inital discovery, so processing | ||
1018 | * the aens here will catch them. | ||
1019 | */ | ||
1020 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
1021 | qla4xxx_process_aen(ha, PROCESS_ALL_AENS); | ||
1022 | 894 | ||
1023 | return status; | 895 | return status; |
1024 | } | 896 | } |
@@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1537 | uint32_t state, uint32_t conn_err) | 1409 | uint32_t state, uint32_t conn_err) |
1538 | { | 1410 | { |
1539 | struct ddb_entry * ddb_entry; | 1411 | struct ddb_entry * ddb_entry; |
1540 | uint32_t old_fw_ddb_device_state; | ||
1541 | 1412 | ||
1542 | /* check for out of range index */ | 1413 | /* check for out of range index */ |
1543 | if (fw_ddb_index >= MAX_DDB_ENTRIES) | 1414 | if (fw_ddb_index >= MAX_DDB_ENTRIES) |
@@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1553 | } | 1424 | } |
1554 | 1425 | ||
1555 | /* Device already exists in our database. */ | 1426 | /* Device already exists in our database. */ |
1556 | old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; | ||
1557 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " | 1427 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " |
1558 | "index [%d]\n", ha->host_no, __func__, | 1428 | "index [%d]\n", ha->host_no, __func__, |
1559 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); | 1429 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); |
1560 | if (old_fw_ddb_device_state == state && | ||
1561 | state == DDB_DS_SESSION_ACTIVE) { | ||
1562 | if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { | ||
1563 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1564 | iscsi_unblock_session(ddb_entry->sess); | ||
1565 | } | ||
1566 | return QLA_SUCCESS; | ||
1567 | } | ||
1568 | 1430 | ||
1569 | ddb_entry->fw_ddb_device_state = state; | 1431 | ddb_entry->fw_ddb_device_state = state; |
1570 | /* Device is back online. */ | 1432 | /* Device is back online. */ |
1571 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1433 | if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && |
1434 | (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { | ||
1572 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | 1435 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); |
1573 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1436 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1574 | atomic_set(&ddb_entry->relogin_timer, 0); | 1437 | atomic_set(&ddb_entry->relogin_timer, 0); |
1575 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1438 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
1576 | clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); | ||
1577 | iscsi_unblock_session(ddb_entry->sess); | 1439 | iscsi_unblock_session(ddb_entry->sess); |
1578 | iscsi_session_event(ddb_entry->sess, | 1440 | iscsi_session_event(ddb_entry->sess, |
1579 | ISCSI_KEVENT_CREATE_SESSION); | 1441 | ISCSI_KEVENT_CREATE_SESSION); |
@@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1581 | * Change the lun state to READY in case the lun TIMEOUT before | 1443 | * Change the lun state to READY in case the lun TIMEOUT before |
1582 | * the device came back. | 1444 | * the device came back. |
1583 | */ | 1445 | */ |
1584 | } else { | 1446 | } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { |
1585 | /* Device went away, mark device missing */ | 1447 | /* Device went away, mark device missing */ |
1586 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { | 1448 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { |
1587 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " | 1449 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " |
@@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1598 | */ | 1460 | */ |
1599 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && | 1461 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && |
1600 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && | 1462 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && |
1601 | !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && | ||
1602 | qla4_is_relogin_allowed(ha, conn_err)) { | 1463 | qla4_is_relogin_allowed(ha, conn_err)) { |
1603 | /* | 1464 | /* |
1604 | * This triggers a relogin. After the relogin_timer | 1465 | * This triggers a relogin. After the relogin_timer |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 03e028e6e809..2f40ac761cd4 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) | |||
801 | &ha->reg->ctrl_status); | 801 | &ha->reg->ctrl_status); |
802 | readl(&ha->reg->ctrl_status); | 802 | readl(&ha->reg->ctrl_status); |
803 | 803 | ||
804 | if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | 804 | if (!test_bit(AF_HA_REMOVAL, &ha->flags)) |
805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); | 805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); |
806 | 806 | ||
807 | break; | 807 | break; |
@@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) | |||
1008 | mbox_sts[0], mbox_sts[2], | 1008 | mbox_sts[0], mbox_sts[2], |
1009 | mbox_sts[3])); | 1009 | mbox_sts[3])); |
1010 | break; | 1010 | break; |
1011 | } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { | ||
1012 | /* for use during init time, we only want to | ||
1013 | * relogin non-active ddbs */ | ||
1014 | struct ddb_entry *ddb_entry; | ||
1015 | |||
1016 | ddb_entry = | ||
1017 | /* FIXME: name length? */ | ||
1018 | qla4xxx_lookup_ddb_by_fw_index(ha, | ||
1019 | mbox_sts[2]); | ||
1020 | if (!ddb_entry) | ||
1021 | break; | ||
1022 | |||
1023 | ddb_entry->dev_scan_wait_to_complete_relogin = | ||
1024 | 0; | ||
1025 | ddb_entry->dev_scan_wait_to_start_relogin = | ||
1026 | jiffies + | ||
1027 | ((ddb_entry->default_time2wait + | ||
1028 | 4) * HZ); | ||
1029 | |||
1030 | DEBUG2(printk("scsi%ld: ddb [%d] initiate" | ||
1031 | " RELOGIN after %d seconds\n", | ||
1032 | ha->host_no, | ||
1033 | ddb_entry->fw_ddb_index, | ||
1034 | ddb_entry->default_time2wait + | ||
1035 | 4)); | ||
1036 | break; | ||
1037 | } | 1011 | } |
1038 | 1012 | case PROCESS_ALL_AENS: | |
1013 | default: | ||
1039 | if (mbox_sts[1] == 0) { /* Global DB change. */ | 1014 | if (mbox_sts[1] == 0) { /* Global DB change. */ |
1040 | qla4xxx_reinitialize_ddb_list(ha); | 1015 | qla4xxx_reinitialize_ddb_list(ha); |
1041 | } else if (mbox_sts[1] == 1) { /* Specific device. */ | 1016 | } else if (mbox_sts[1] == 1) { /* Specific device. */ |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index f65626aec7c1..f9d81c8372c3 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
32 | u_long wait_count; | 32 | u_long wait_count; |
33 | uint32_t intr_status; | 33 | uint32_t intr_status; |
34 | unsigned long flags = 0; | 34 | unsigned long flags = 0; |
35 | uint32_t dev_state; | ||
35 | 36 | ||
36 | /* Make sure that pointers are valid */ | 37 | /* Make sure that pointers are valid */ |
37 | if (!mbx_cmd || !mbx_sts) { | 38 | if (!mbx_cmd || !mbx_sts) { |
@@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
40 | return status; | 41 | return status; |
41 | } | 42 | } |
42 | 43 | ||
43 | if (is_qla8022(ha) && | 44 | if (is_qla8022(ha)) { |
44 | test_bit(AF_FW_RECOVERY, &ha->flags)) { | 45 | if (test_bit(AF_FW_RECOVERY, &ha->flags)) { |
45 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " | 46 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " |
46 | "completing mbx cmd as firmware recovery detected\n", | 47 | "prematurely completing mbx cmd as firmware " |
47 | ha->host_no, __func__)); | 48 | "recovery detected\n", ha->host_no, __func__)); |
48 | return status; | 49 | return status; |
50 | } | ||
51 | /* Do not send any mbx cmd if h/w is in failed state*/ | ||
52 | qla4_8xxx_idc_lock(ha); | ||
53 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
54 | qla4_8xxx_idc_unlock(ha); | ||
55 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
56 | ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " | ||
57 | "failed state, do not send any mailbox commands\n", | ||
58 | ha->host_no, __func__); | ||
59 | return status; | ||
60 | } | ||
49 | } | 61 | } |
50 | 62 | ||
51 | if ((is_aer_supported(ha)) && | 63 | if ((is_aer_supported(ha)) && |
@@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
139 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && | 151 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && |
140 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && | 152 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && |
141 | test_bit(AF_ONLINE, &ha->flags) && | 153 | test_bit(AF_ONLINE, &ha->flags) && |
142 | !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | 154 | !test_bit(AF_HA_REMOVAL, &ha->flags)) { |
143 | /* Do not poll for completion. Use completion queue */ | 155 | /* Do not poll for completion. Use completion queue */ |
144 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); | 156 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); |
145 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); | 157 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); |
@@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, | |||
395 | /*memcpy(ha->alias, init_fw_cb->Alias, | 407 | /*memcpy(ha->alias, init_fw_cb->Alias, |
396 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ | 408 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ |
397 | 409 | ||
398 | /* Save Command Line Paramater info */ | ||
399 | ha->discovery_wait = ql4xdiscoverywait; | ||
400 | |||
401 | if (ha->acb_version == ACB_SUPPORTED) { | 410 | if (ha->acb_version == ACB_SUPPORTED) { |
402 | ha->ipv6_options = init_fw_cb->ipv6_opts; | 411 | ha->ipv6_options = init_fw_cb->ipv6_opts; |
403 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; | 412 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; |
@@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) | |||
467 | 476 | ||
468 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); | 477 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); |
469 | 478 | ||
479 | /* Set bit for "serialize task mgmt" all other bits need to be zero */ | ||
480 | init_fw_cb->add_fw_options = 0; | ||
481 | init_fw_cb->add_fw_options |= | ||
482 | __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); | ||
483 | |||
470 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) | 484 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) |
471 | != QLA_SUCCESS) { | 485 | != QLA_SUCCESS) { |
472 | DEBUG2(printk(KERN_WARNING | 486 | DEBUG2(printk(KERN_WARNING |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 3d5ef2df4134..35381cb0936e 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) | |||
2304 | void | 2304 | void |
2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) | 2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) |
2306 | { | 2306 | { |
2307 | if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) | 2307 | if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) |
2308 | qla4_8xxx_mbx_intr_disable(ha); | 2308 | qla4_8xxx_mbx_intr_disable(ha); |
2309 | 2309 | ||
2310 | spin_lock_irq(&ha->hardware_lock); | 2310 | spin_lock_irq(&ha->hardware_lock); |
2311 | /* BIT 10 - set */ | 2311 | /* BIT 10 - set */ |
2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); | 2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); |
2313 | spin_unlock_irq(&ha->hardware_lock); | 2313 | spin_unlock_irq(&ha->hardware_lock); |
2314 | clear_bit(AF_INTERRUPTS_ON, &ha->flags); | ||
2315 | } | 2314 | } |
2316 | 2315 | ||
2317 | struct ql4_init_msix_entry { | 2316 | struct ql4_init_msix_entry { |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 967836ef5ab2..a4acb0dd7beb 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep; | |||
29 | /* | 29 | /* |
30 | * Module parameter information and variables | 30 | * Module parameter information and variables |
31 | */ | 31 | */ |
32 | int ql4xdiscoverywait = 60; | ||
33 | module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR); | ||
34 | MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); | ||
35 | |||
36 | int ql4xdontresethba = 0; | 32 | int ql4xdontresethba = 0; |
37 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); | 33 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); |
38 | MODULE_PARM_DESC(ql4xdontresethba, | 34 | MODULE_PARM_DESC(ql4xdontresethba, |
@@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix, | |||
55 | " 2 = enable MSI interrupt mechanism."); | 51 | " 2 = enable MSI interrupt mechanism."); |
56 | 52 | ||
57 | #define QL4_DEF_QDEPTH 32 | 53 | #define QL4_DEF_QDEPTH 32 |
54 | static int ql4xmaxqdepth = QL4_DEF_QDEPTH; | ||
55 | module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); | ||
56 | MODULE_PARM_DESC(ql4xmaxqdepth, | ||
57 | "Maximum queue depth to report for target devices.\n" | ||
58 | " Default: 32."); | ||
59 | |||
60 | static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; | ||
61 | module_param(ql4xsess_recovery_tmo, int, S_IRUGO); | ||
62 | MODULE_PARM_DESC(ql4xsess_recovery_tmo, | ||
63 | "Target Session Recovery Timeout.\n" | ||
64 | " Default: 30 sec."); | ||
58 | 65 | ||
59 | /* | 66 | /* |
60 | * SCSI host template entry points | 67 | * SCSI host template entry points |
@@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) | |||
165 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " | 172 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " |
166 | "of (%d) secs exhausted, marking device DEAD.\n", | 173 | "of (%d) secs exhausted, marking device DEAD.\n", |
167 | ha->host_no, __func__, ddb_entry->fw_ddb_index, | 174 | ha->host_no, __func__, ddb_entry->fw_ddb_index, |
168 | QL4_SESS_RECOVERY_TMO)); | 175 | ddb_entry->sess->recovery_tmo)); |
169 | } | 176 | } |
170 | } | 177 | } |
171 | 178 | ||
@@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) | |||
295 | { | 302 | { |
296 | int err; | 303 | int err; |
297 | 304 | ||
298 | ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; | 305 | ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; |
299 | 306 | ||
300 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); | 307 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); |
301 | if (err) { | 308 | if (err) { |
@@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) | |||
753 | if (!pci_channel_offline(ha->pdev)) | 760 | if (!pci_channel_offline(ha->pdev)) |
754 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | 761 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); |
755 | 762 | ||
756 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | ||
757 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", | ||
758 | __func__)); | ||
759 | return; | ||
760 | } | ||
761 | |||
762 | if (is_qla8022(ha)) { | 763 | if (is_qla8022(ha)) { |
763 | qla4_8xxx_watchdog(ha); | 764 | qla4_8xxx_watchdog(ha); |
764 | } | 765 | } |
@@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) | |||
1067 | 1068 | ||
1068 | /* Disable the board */ | 1069 | /* Disable the board */ |
1069 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); | 1070 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); |
1070 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | ||
1071 | 1071 | ||
1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); | 1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); |
1073 | qla4xxx_mark_all_devices_missing(ha); | 1073 | qla4xxx_mark_all_devices_missing(ha); |
@@ -1218,6 +1218,27 @@ recover_ha_init_adapter: | |||
1218 | return status; | 1218 | return status; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) | ||
1222 | { | ||
1223 | struct ddb_entry *ddb_entry, *dtemp; | ||
1224 | |||
1225 | list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { | ||
1226 | if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || | ||
1227 | (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { | ||
1228 | if (ddb_entry->fw_ddb_device_state == | ||
1229 | DDB_DS_SESSION_ACTIVE) { | ||
1230 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1231 | ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" | ||
1232 | " marked ONLINE\n", ha->host_no, __func__, | ||
1233 | ddb_entry->fw_ddb_index); | ||
1234 | |||
1235 | iscsi_unblock_session(ddb_entry->sess); | ||
1236 | } else | ||
1237 | qla4xxx_relogin_device(ha, ddb_entry); | ||
1238 | } | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1221 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) | 1242 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) |
1222 | { | 1243 | { |
1223 | if (ha->dpc_thread && | 1244 | if (ha->dpc_thread && |
@@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work) | |||
1259 | goto do_dpc_exit; | 1280 | goto do_dpc_exit; |
1260 | } | 1281 | } |
1261 | 1282 | ||
1262 | /* HBA is in the process of being permanently disabled. | ||
1263 | * Don't process anything */ | ||
1264 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | ||
1265 | return; | ||
1266 | |||
1267 | if (is_qla8022(ha)) { | 1283 | if (is_qla8022(ha)) { |
1268 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { | 1284 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { |
1269 | qla4_8xxx_idc_lock(ha); | 1285 | qla4_8xxx_idc_lock(ha); |
@@ -1331,13 +1347,7 @@ dpc_post_reset_ha: | |||
1331 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { | 1347 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { |
1332 | if (!test_bit(AF_LINK_UP, &ha->flags)) { | 1348 | if (!test_bit(AF_LINK_UP, &ha->flags)) { |
1333 | /* ---- link down? --- */ | 1349 | /* ---- link down? --- */ |
1334 | list_for_each_entry_safe(ddb_entry, dtemp, | 1350 | qla4xxx_mark_all_devices_missing(ha); |
1335 | &ha->ddb_list, list) { | ||
1336 | if (atomic_read(&ddb_entry->state) == | ||
1337 | DDB_STATE_ONLINE) | ||
1338 | qla4xxx_mark_device_missing(ha, | ||
1339 | ddb_entry); | ||
1340 | } | ||
1341 | } else { | 1351 | } else { |
1342 | /* ---- link up? --- * | 1352 | /* ---- link up? --- * |
1343 | * F/W will auto login to all devices ONLY ONCE after | 1353 | * F/W will auto login to all devices ONLY ONCE after |
@@ -1346,30 +1356,7 @@ dpc_post_reset_ha: | |||
1346 | * manually relogin to devices when recovering from | 1356 | * manually relogin to devices when recovering from |
1347 | * connection failures, logouts, expired KATO, etc. */ | 1357 | * connection failures, logouts, expired KATO, etc. */ |
1348 | 1358 | ||
1349 | list_for_each_entry_safe(ddb_entry, dtemp, | 1359 | qla4xxx_relogin_all_devices(ha); |
1350 | &ha->ddb_list, list) { | ||
1351 | if ((atomic_read(&ddb_entry->state) == | ||
1352 | DDB_STATE_MISSING) || | ||
1353 | (atomic_read(&ddb_entry->state) == | ||
1354 | DDB_STATE_DEAD)) { | ||
1355 | if (ddb_entry->fw_ddb_device_state == | ||
1356 | DDB_DS_SESSION_ACTIVE) { | ||
1357 | atomic_set(&ddb_entry->state, | ||
1358 | DDB_STATE_ONLINE); | ||
1359 | ql4_printk(KERN_INFO, ha, | ||
1360 | "scsi%ld: %s: ddb[%d]" | ||
1361 | " marked ONLINE\n", | ||
1362 | ha->host_no, __func__, | ||
1363 | ddb_entry->fw_ddb_index); | ||
1364 | |||
1365 | iscsi_unblock_session( | ||
1366 | ddb_entry->sess); | ||
1367 | } else | ||
1368 | qla4xxx_relogin_device( | ||
1369 | ha, ddb_entry); | ||
1370 | } | ||
1371 | |||
1372 | } | ||
1373 | } | 1360 | } |
1374 | } | 1361 | } |
1375 | 1362 | ||
@@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1630 | uint8_t init_retry_count = 0; | 1617 | uint8_t init_retry_count = 0; |
1631 | char buf[34]; | 1618 | char buf[34]; |
1632 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; | 1619 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; |
1620 | uint32_t dev_state; | ||
1633 | 1621 | ||
1634 | if (pci_enable_device(pdev)) | 1622 | if (pci_enable_device(pdev)) |
1635 | return -1; | 1623 | return -1; |
@@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1713 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); | 1701 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); |
1714 | while ((!test_bit(AF_ONLINE, &ha->flags)) && | 1702 | while ((!test_bit(AF_ONLINE, &ha->flags)) && |
1715 | init_retry_count++ < MAX_INIT_RETRIES) { | 1703 | init_retry_count++ < MAX_INIT_RETRIES) { |
1704 | |||
1705 | if (is_qla8022(ha)) { | ||
1706 | qla4_8xxx_idc_lock(ha); | ||
1707 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
1708 | qla4_8xxx_idc_unlock(ha); | ||
1709 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
1710 | ql4_printk(KERN_WARNING, ha, "%s: don't retry " | ||
1711 | "initialize adapter. H/W is in failed state\n", | ||
1712 | __func__); | ||
1713 | break; | ||
1714 | } | ||
1715 | } | ||
1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " | 1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " |
1717 | "(%d)\n", __func__, init_retry_count)); | 1717 | "(%d)\n", __func__, init_retry_count)); |
1718 | 1718 | ||
@@ -1815,6 +1815,44 @@ probe_disable_device: | |||
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | /** | 1817 | /** |
1818 | * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize | ||
1819 | * @ha: pointer to adapter structure | ||
1820 | * | ||
1821 | * Mark the other ISP-4xxx port to indicate that the driver is being removed, | ||
1822 | * so that the other port will not re-initialize while in the process of | ||
1823 | * removing the ha due to driver unload or hba hotplug. | ||
1824 | **/ | ||
1825 | static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) | ||
1826 | { | ||
1827 | struct scsi_qla_host *other_ha = NULL; | ||
1828 | struct pci_dev *other_pdev = NULL; | ||
1829 | int fn = ISP4XXX_PCI_FN_2; | ||
1830 | |||
1831 | /*iscsi function numbers for ISP4xxx is 1 and 3*/ | ||
1832 | if (PCI_FUNC(ha->pdev->devfn) & BIT_1) | ||
1833 | fn = ISP4XXX_PCI_FN_1; | ||
1834 | |||
1835 | other_pdev = | ||
1836 | pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), | ||
1837 | ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), | ||
1838 | fn)); | ||
1839 | |||
1840 | /* Get other_ha if other_pdev is valid and state is enable*/ | ||
1841 | if (other_pdev) { | ||
1842 | if (atomic_read(&other_pdev->enable_cnt)) { | ||
1843 | other_ha = pci_get_drvdata(other_pdev); | ||
1844 | if (other_ha) { | ||
1845 | set_bit(AF_HA_REMOVAL, &other_ha->flags); | ||
1846 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " | ||
1847 | "Prevent %s reinit\n", __func__, | ||
1848 | dev_name(&other_ha->pdev->dev))); | ||
1849 | } | ||
1850 | } | ||
1851 | pci_dev_put(other_pdev); | ||
1852 | } | ||
1853 | } | ||
1854 | |||
1855 | /** | ||
1818 | * qla4xxx_remove_adapter - calback function to remove adapter. | 1856 | * qla4xxx_remove_adapter - calback function to remove adapter. |
1819 | * @pci_dev: PCI device pointer | 1857 | * @pci_dev: PCI device pointer |
1820 | **/ | 1858 | **/ |
@@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) | |||
1824 | 1862 | ||
1825 | ha = pci_get_drvdata(pdev); | 1863 | ha = pci_get_drvdata(pdev); |
1826 | 1864 | ||
1827 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | 1865 | if (!is_qla8022(ha)) |
1866 | qla4xxx_prevent_other_port_reinit(ha); | ||
1828 | 1867 | ||
1829 | /* remove devs from iscsi_sessions to scsi_devices */ | 1868 | /* remove devs from iscsi_sessions to scsi_devices */ |
1830 | qla4xxx_free_ddb_list(ha); | 1869 | qla4xxx_free_ddb_list(ha); |
@@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev) | |||
1868 | { | 1907 | { |
1869 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); | 1908 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); |
1870 | struct ddb_entry *ddb = sess->dd_data; | 1909 | struct ddb_entry *ddb = sess->dd_data; |
1910 | int queue_depth = QL4_DEF_QDEPTH; | ||
1871 | 1911 | ||
1872 | sdev->hostdata = ddb; | 1912 | sdev->hostdata = ddb; |
1873 | sdev->tagged_supported = 1; | 1913 | sdev->tagged_supported = 1; |
1874 | scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); | 1914 | |
1915 | if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) | ||
1916 | queue_depth = ql4xmaxqdepth; | ||
1917 | |||
1918 | scsi_activate_tcq(sdev, queue_depth); | ||
1875 | return 0; | 1919 | return 0; |
1876 | } | 1920 | } |
1877 | 1921 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 8475b308e01b..603155769407 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -5,4 +5,4 @@ | |||
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k5" | 8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k6" |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b4218390941e..3fd16d7212de 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev, \ | |||
1917 | #define iscsi_priv_session_rw_attr(field, format) \ | 1917 | #define iscsi_priv_session_rw_attr(field, format) \ |
1918 | iscsi_priv_session_attr_show(field, format) \ | 1918 | iscsi_priv_session_attr_show(field, format) \ |
1919 | iscsi_priv_session_attr_store(field) \ | 1919 | iscsi_priv_session_attr_store(field) \ |
1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ | 1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ |
1921 | show_priv_session_##field, \ | 1921 | show_priv_session_##field, \ |
1922 | store_priv_session_##field) | 1922 | store_priv_session_##field) |
1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); | 1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7ff61d76b4c5..b61ebec6bca7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2027 | int old_rcd = sdkp->RCD; | 2027 | int old_rcd = sdkp->RCD; |
2028 | int old_dpofua = sdkp->DPOFUA; | 2028 | int old_dpofua = sdkp->DPOFUA; |
2029 | 2029 | ||
2030 | if (sdp->skip_ms_page_8) { | 2030 | if (sdp->skip_ms_page_8) |
2031 | if (sdp->type == TYPE_RBC) | 2031 | goto defaults; |
2032 | goto defaults; | 2032 | |
2033 | else { | 2033 | if (sdp->type == TYPE_RBC) { |
2034 | modepage = 0x3F; | ||
2035 | dbd = 0; | ||
2036 | } | ||
2037 | } else if (sdp->type == TYPE_RBC) { | ||
2038 | modepage = 6; | 2034 | modepage = 6; |
2039 | dbd = 8; | 2035 | dbd = 8; |
2040 | } else { | 2036 | } else { |
@@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2062 | */ | 2058 | */ |
2063 | if (len < 3) | 2059 | if (len < 3) |
2064 | goto bad_sense; | 2060 | goto bad_sense; |
2065 | else if (len > SD_BUF_SIZE) { | 2061 | if (len > 20) |
2066 | sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " | 2062 | len = 20; |
2067 | "data from %d to %d bytes\n", len, SD_BUF_SIZE); | 2063 | |
2068 | len = SD_BUF_SIZE; | 2064 | /* Take headers and block descriptors into account */ |
2069 | } | 2065 | len += data.header_length + data.block_descriptor_length; |
2066 | if (len > SD_BUF_SIZE) | ||
2067 | goto bad_sense; | ||
2070 | 2068 | ||
2071 | /* Get the data */ | 2069 | /* Get the data */ |
2072 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); | 2070 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); |
@@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2074 | if (scsi_status_is_good(res)) { | 2072 | if (scsi_status_is_good(res)) { |
2075 | int offset = data.header_length + data.block_descriptor_length; | 2073 | int offset = data.header_length + data.block_descriptor_length; |
2076 | 2074 | ||
2077 | while (offset < len) { | 2075 | if (offset >= SD_BUF_SIZE - 2) { |
2078 | u8 page_code = buffer[offset] & 0x3F; | 2076 | sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); |
2079 | u8 spf = buffer[offset] & 0x40; | 2077 | goto defaults; |
2080 | |||
2081 | if (page_code == 8 || page_code == 6) { | ||
2082 | /* We're interested only in the first 3 bytes. | ||
2083 | */ | ||
2084 | if (len - offset <= 2) { | ||
2085 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2086 | "mode parameter data\n"); | ||
2087 | goto defaults; | ||
2088 | } else { | ||
2089 | modepage = page_code; | ||
2090 | goto Page_found; | ||
2091 | } | ||
2092 | } else { | ||
2093 | /* Go to the next page */ | ||
2094 | if (spf && len - offset > 3) | ||
2095 | offset += 4 + (buffer[offset+2] << 8) + | ||
2096 | buffer[offset+3]; | ||
2097 | else if (!spf && len - offset > 1) | ||
2098 | offset += 2 + buffer[offset+1]; | ||
2099 | else { | ||
2100 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2101 | "mode parameter data\n"); | ||
2102 | goto defaults; | ||
2103 | } | ||
2104 | } | ||
2105 | } | 2078 | } |
2106 | 2079 | ||
2107 | if (modepage == 0x3F) { | 2080 | if ((buffer[offset] & 0x3f) != modepage) { |
2108 | sd_printk(KERN_ERR, sdkp, "No Caching mode page " | ||
2109 | "present\n"); | ||
2110 | goto defaults; | ||
2111 | } else if ((buffer[offset] & 0x3f) != modepage) { | ||
2112 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); | 2081 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); |
2113 | goto defaults; | 2082 | goto defaults; |
2114 | } | 2083 | } |
2115 | Page_found: | 2084 | |
2116 | if (modepage == 8) { | 2085 | if (modepage == 8) { |
2117 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); | 2086 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); |
2118 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); | 2087 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a86f820..eb7a3e85304f 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -35,9 +35,11 @@ | |||
35 | 35 | ||
36 | struct ses_device { | 36 | struct ses_device { |
37 | unsigned char *page1; | 37 | unsigned char *page1; |
38 | unsigned char *page1_types; | ||
38 | unsigned char *page2; | 39 | unsigned char *page2; |
39 | unsigned char *page10; | 40 | unsigned char *page10; |
40 | short page1_len; | 41 | short page1_len; |
42 | short page1_num_types; | ||
41 | short page2_len; | 43 | short page2_len; |
42 | short page10_len; | 44 | short page10_len; |
43 | }; | 45 | }; |
@@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, | |||
110 | int i, j, count = 0, descriptor = ecomp->number; | 112 | int i, j, count = 0, descriptor = ecomp->number; |
111 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 113 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
112 | struct ses_device *ses_dev = edev->scratch; | 114 | struct ses_device *ses_dev = edev->scratch; |
113 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 115 | unsigned char *type_ptr = ses_dev->page1_types; |
114 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 116 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
115 | 117 | ||
116 | /* Clear everything */ | 118 | /* Clear everything */ |
117 | memset(desc_ptr, 0, ses_dev->page2_len - 8); | 119 | memset(desc_ptr, 0, ses_dev->page2_len - 8); |
118 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 120 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
119 | for (j = 0; j < type_ptr[1]; j++) { | 121 | for (j = 0; j < type_ptr[1]; j++) { |
120 | desc_ptr += 4; | 122 | desc_ptr += 4; |
121 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 123 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, | |||
140 | int i, j, count = 0, descriptor = ecomp->number; | 142 | int i, j, count = 0, descriptor = ecomp->number; |
141 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 143 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
142 | struct ses_device *ses_dev = edev->scratch; | 144 | struct ses_device *ses_dev = edev->scratch; |
143 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 145 | unsigned char *type_ptr = ses_dev->page1_types; |
144 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 146 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
145 | 147 | ||
146 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 148 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
147 | 149 | ||
148 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 150 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
149 | for (j = 0; j < type_ptr[1]; j++) { | 151 | for (j = 0; j < type_ptr[1]; j++) { |
150 | desc_ptr += 4; | 152 | desc_ptr += 4; |
151 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 153 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
358 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; | 360 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; |
359 | int i, j, page7_len, len, components; | 361 | int i, j, page7_len, len, components; |
360 | struct ses_device *ses_dev = edev->scratch; | 362 | struct ses_device *ses_dev = edev->scratch; |
361 | int types = ses_dev->page1[10]; | 363 | int types = ses_dev->page1_num_types; |
362 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); | 364 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); |
363 | 365 | ||
364 | if (!hdr_buf) | 366 | if (!hdr_buf) |
@@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
390 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 392 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
391 | /* skip past overall descriptor */ | 393 | /* skip past overall descriptor */ |
392 | desc_ptr += len + 4; | 394 | desc_ptr += len + 4; |
393 | if (ses_dev->page10) | ||
394 | addl_desc_ptr = ses_dev->page10 + 8; | ||
395 | } | 395 | } |
396 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 396 | if (ses_dev->page10) |
397 | addl_desc_ptr = ses_dev->page10 + 8; | ||
398 | type_ptr = ses_dev->page1_types; | ||
397 | components = 0; | 399 | components = 0; |
398 | for (i = 0; i < types; i++, type_ptr += 4) { | 400 | for (i = 0; i < types; i++, type_ptr += 4) { |
399 | for (j = 0; j < type_ptr[1]; j++) { | 401 | for (j = 0; j < type_ptr[1]; j++) { |
@@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev, | |||
503 | u32 result; | 505 | u32 result; |
504 | int i, types, len, components = 0; | 506 | int i, types, len, components = 0; |
505 | int err = -ENOMEM; | 507 | int err = -ENOMEM; |
508 | int num_enclosures; | ||
506 | struct enclosure_device *edev; | 509 | struct enclosure_device *edev; |
507 | struct ses_component *scomp = NULL; | 510 | struct ses_component *scomp = NULL; |
508 | 511 | ||
@@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev, | |||
530 | if (result) | 533 | if (result) |
531 | goto recv_failed; | 534 | goto recv_failed; |
532 | 535 | ||
533 | if (hdr_buf[1] != 0) { | ||
534 | /* FIXME: need subenclosure support; I've just never | ||
535 | * seen a device with subenclosures and it makes the | ||
536 | * traversal routines more complex */ | ||
537 | sdev_printk(KERN_ERR, sdev, | ||
538 | "FIXME driver has no support for subenclosures (%d)\n", | ||
539 | hdr_buf[1]); | ||
540 | goto err_free; | ||
541 | } | ||
542 | |||
543 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 536 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
544 | buf = kzalloc(len, GFP_KERNEL); | 537 | buf = kzalloc(len, GFP_KERNEL); |
545 | if (!buf) | 538 | if (!buf) |
@@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev, | |||
549 | if (result) | 542 | if (result) |
550 | goto recv_failed; | 543 | goto recv_failed; |
551 | 544 | ||
552 | types = buf[10]; | 545 | types = 0; |
553 | 546 | ||
554 | type_ptr = buf + 12 + buf[11]; | 547 | /* we always have one main enclosure and the rest are referred |
548 | * to as secondary subenclosures */ | ||
549 | num_enclosures = buf[1] + 1; | ||
555 | 550 | ||
556 | for (i = 0; i < types; i++, type_ptr += 4) { | 551 | /* begin at the enclosure descriptor */ |
552 | type_ptr = buf + 8; | ||
553 | /* skip all the enclosure descriptors */ | ||
554 | for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { | ||
555 | types += type_ptr[2]; | ||
556 | type_ptr += type_ptr[3] + 4; | ||
557 | } | ||
558 | |||
559 | ses_dev->page1_types = type_ptr; | ||
560 | ses_dev->page1_num_types = types; | ||
561 | |||
562 | for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { | ||
557 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | 563 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
558 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 564 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) |
559 | components += type_ptr[1]; | 565 | components += type_ptr[1]; |