aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c600
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c406
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h20
13 files changed, 1622 insertions, 75 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4de95559c6b7..bbc5be6ff5b7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -722,6 +722,20 @@ struct lpfc_hba {
722 uint32_t cfg_hba_queue_depth; 722 uint32_t cfg_hba_queue_depth;
723 uint32_t cfg_enable_hba_reset; 723 uint32_t cfg_enable_hba_reset;
724 uint32_t cfg_enable_hba_heartbeat; 724 uint32_t cfg_enable_hba_heartbeat;
725 uint32_t cfg_fof;
726 uint32_t cfg_EnableXLane;
727 uint8_t cfg_oas_tgt_wwpn[8];
728 uint8_t cfg_oas_vpt_wwpn[8];
729 uint32_t cfg_oas_lun_state;
730#define OAS_LUN_ENABLE 1
731#define OAS_LUN_DISABLE 0
732 uint32_t cfg_oas_lun_status;
733#define OAS_LUN_STATUS_EXISTS 0x01
734 uint32_t cfg_oas_flags;
735#define OAS_FIND_ANY_VPORT 0x01
736#define OAS_FIND_ANY_TARGET 0x02
737#define OAS_LUN_VALID 0x04
738 uint32_t cfg_XLanePriority;
725 uint32_t cfg_enable_bg; 739 uint32_t cfg_enable_bg;
726 uint32_t cfg_hostmem_hgp; 740 uint32_t cfg_hostmem_hgp;
727 uint32_t cfg_log_verbose; 741 uint32_t cfg_log_verbose;
@@ -973,6 +987,9 @@ struct lpfc_hba {
973 atomic_t sdev_cnt; 987 atomic_t sdev_cnt;
974 uint8_t fips_spec_rev; 988 uint8_t fips_spec_rev;
975 uint8_t fips_level; 989 uint8_t fips_level;
990 spinlock_t devicelock; /* lock for luns list */
991 mempool_t *device_data_mem_pool;
992 struct list_head luns;
976}; 993};
977 994
978static inline struct Scsi_Host * 995static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc92b93..ba8b77aa554d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
529} 529}
530 530
531/** 531/**
532 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
533 * (OAS) is supported.
534 * @dev: class unused variable.
535 * @attr: device attribute, not used.
536 * @buf: on return contains the module description text.
537 *
538 * Returns: size of formatted string.
539 **/
540static ssize_t
541lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
542 char *buf)
543{
544 struct Scsi_Host *shost = class_to_shost(dev);
545 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
546 struct lpfc_hba *phba = vport->phba;
547
548 return snprintf(buf, PAGE_SIZE, "%d\n",
549 phba->sli4_hba.pc_sli4_params.oas_supported);
550}
551
552/**
532 * lpfc_link_state_store - Transition the link_state on an HBA port 553 * lpfc_link_state_store - Transition the link_state on an HBA port
533 * @dev: class device that is converted into a Scsi_host. 554 * @dev: class device that is converted into a Scsi_host.
534 * @attr: device attribute, not used. 555 * @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
2041static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, 2062static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
2042 lpfc_sriov_hw_max_virtfn_show, NULL); 2063 lpfc_sriov_hw_max_virtfn_show, NULL);
2043static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2064static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2065static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2066 NULL);
2044 2067
2045static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 2068static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2069#define WWN_SZ 8
2070/**
2071 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2072 * @buf: WWN string.
2073 * @cnt: Length of string.
2074 * @wwn: Array to receive converted wwn value.
2075 *
2076 * Returns:
2077 * -EINVAL if the buffer does not contain a valid wwn
2078 * 0 success
2079 **/
2080static size_t
2081lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2082{
2083 unsigned int i, j;
2084
2085 /* Count may include a LF at end of string */
2086 if (buf[cnt-1] == '\n')
2087 cnt--;
2046 2088
2089 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2090 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2091 return -EINVAL;
2092
2093 memset(wwn, 0, WWN_SZ);
2094
2095 /* Validate and store the new name */
2096 for (i = 0, j = 0; i < 16; i++) {
2097 if ((*buf >= 'a') && (*buf <= 'f'))
2098 j = ((j << 4) | ((*buf++ - 'a') + 10));
2099 else if ((*buf >= 'A') && (*buf <= 'F'))
2100 j = ((j << 4) | ((*buf++ - 'A') + 10));
2101 else if ((*buf >= '0') && (*buf <= '9'))
2102 j = ((j << 4) | (*buf++ - '0'));
2103 else
2104 return -EINVAL;
2105 if (i % 2) {
2106 wwn[i/2] = j & 0xff;
2107 j = 0;
2108 }
2109 }
2110 return 0;
2111}
2047/** 2112/**
2048 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid 2113 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2049 * @dev: class device that is converted into a Scsi_host. 2114 * @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2132 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2197 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2133 struct lpfc_hba *phba = vport->phba; 2198 struct lpfc_hba *phba = vport->phba;
2134 struct completion online_compl; 2199 struct completion online_compl;
2135 int stat1=0, stat2=0; 2200 int stat1 = 0, stat2 = 0;
2136 unsigned int i, j, cnt=count; 2201 unsigned int cnt = count;
2137 u8 wwpn[8]; 2202 u8 wwpn[WWN_SZ];
2138 int rc; 2203 int rc;
2139 2204
2140 if (!phba->cfg_enable_hba_reset) 2205 if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2149 if (buf[cnt-1] == '\n') 2214 if (buf[cnt-1] == '\n')
2150 cnt--; 2215 cnt--;
2151 2216
2152 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || 2217 if (!phba->soft_wwn_enable)
2153 ((cnt == 17) && (*buf++ != 'x')) ||
2154 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2155 return -EINVAL; 2218 return -EINVAL;
2156 2219
2220 /* lock setting wwpn, wwnn down */
2157 phba->soft_wwn_enable = 0; 2221 phba->soft_wwn_enable = 0;
2158 2222
2159 memset(wwpn, 0, sizeof(wwpn)); 2223 rc = lpfc_wwn_set(buf, cnt, wwpn);
2160 2224 if (!rc) {
2161 /* Validate and store the new name */ 2225 /* not able to set wwpn, unlock it */
2162 for (i=0, j=0; i < 16; i++) { 2226 phba->soft_wwn_enable = 1;
2163 int value; 2227 return rc;
2164
2165 value = hex_to_bin(*buf++);
2166 if (value >= 0)
2167 j = (j << 4) | value;
2168 else
2169 return -EINVAL;
2170 if (i % 2) {
2171 wwpn[i/2] = j & 0xff;
2172 j = 0;
2173 }
2174 } 2228 }
2229
2175 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 2230 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2176 fc_host_port_name(shost) = phba->cfg_soft_wwpn; 2231 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2177 if (phba->cfg_soft_wwnn) 2232 if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2198 "reinit adapter - %d\n", stat2); 2253 "reinit adapter - %d\n", stat2);
2199 return (stat1 || stat2) ? -EIO : count; 2254 return (stat1 || stat2) ? -EIO : count;
2200} 2255}
2201static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ 2256static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
2202 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); 2257 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
2203 2258
2204/** 2259/**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2235{ 2290{
2236 struct Scsi_Host *shost = class_to_shost(dev); 2291 struct Scsi_Host *shost = class_to_shost(dev);
2237 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2292 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2238 unsigned int i, j, cnt=count; 2293 unsigned int cnt = count;
2239 u8 wwnn[8]; 2294 u8 wwnn[WWN_SZ];
2295 int rc;
2240 2296
2241 /* count may include a LF at end of string */ 2297 /* count may include a LF at end of string */
2242 if (buf[cnt-1] == '\n') 2298 if (buf[cnt-1] == '\n')
2243 cnt--; 2299 cnt--;
2244 2300
2245 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || 2301 if (!phba->soft_wwn_enable)
2246 ((cnt == 17) && (*buf++ != 'x')) ||
2247 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2248 return -EINVAL; 2302 return -EINVAL;
2249 2303
2250 /* 2304 rc = lpfc_wwn_set(buf, cnt, wwnn);
2251 * Allow wwnn to be set many times, as long as the enable is set. 2305 if (!rc) {
2252 * However, once the wwpn is set, everything locks. 2306 /* Allow wwnn to be set many times, as long as the enable
2253 */ 2307 * is set. However, once the wwpn is set, everything locks.
2254 2308 */
2255 memset(wwnn, 0, sizeof(wwnn)); 2309 return rc;
2256
2257 /* Validate and store the new name */
2258 for (i=0, j=0; i < 16; i++) {
2259 int value;
2260
2261 value = hex_to_bin(*buf++);
2262 if (value >= 0)
2263 j = (j << 4) | value;
2264 else
2265 return -EINVAL;
2266 if (i % 2) {
2267 wwnn[i/2] = j & 0xff;
2268 j = 0;
2269 }
2270 } 2310 }
2311
2271 phba->cfg_soft_wwnn = wwn_to_u64(wwnn); 2312 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2272 2313
2273 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 2314 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2276 2317
2277 return count; 2318 return count;
2278} 2319}
2279static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ 2320static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
2280 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); 2321 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
2281 2322
2323/**
2324 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2325 * Optimized Access Storage (OAS) operations.
2326 * @dev: class device that is converted into a Scsi_host.
2327 * @attr: device attribute, not used.
2328 * @buf: buffer for passing information.
2329 *
2330 * Returns:
2331 * value of count
2332 **/
2333static ssize_t
2334lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2335 char *buf)
2336{
2337 struct Scsi_Host *shost = class_to_shost(dev);
2338 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2339
2340 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
2341 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2342}
2343
2344/**
2345 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2346 * Optimized Access Storage (OAS) operations.
2347 * @dev: class device that is converted into a Scsi_host.
2348 * @attr: device attribute, not used.
2349 * @buf: buffer for passing information.
2350 * @count: Size of the data buffer.
2351 *
2352 * Returns:
2353 * -EINVAL count is invalid, invalid wwpn byte invalid
2354 * -EPERM oas is not supported by hba
2355 * value of count on success
2356 **/
2357static ssize_t
2358lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2359 const char *buf, size_t count)
2360{
2361 struct Scsi_Host *shost = class_to_shost(dev);
2362 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2363 unsigned int cnt = count;
2364 uint8_t wwpn[WWN_SZ];
2365 int rc;
2366
2367 if (!phba->cfg_EnableXLane)
2368 return -EPERM;
2369
2370 /* count may include a LF at end of string */
2371 if (buf[cnt-1] == '\n')
2372 cnt--;
2373
2374 rc = lpfc_wwn_set(buf, cnt, wwpn);
2375 if (rc)
2376 return rc;
2377
2378 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2379 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2380 if (wwn_to_u64(wwpn) == 0)
2381 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2382 else
2383 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2384 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2385 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2386 return count;
2387}
2388static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2389 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2390
2391/**
2392 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
2393 * for Optimized Access Storage (OAS) operations.
2394 * @dev: class device that is converted into a Scsi_host.
2395 * @attr: device attribute, not used.
2396 * @buf: buffer for passing information.
2397 *
2398 * Returns:
2399 * value of count on success
2400 **/
2401static ssize_t
2402lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
2403 char *buf)
2404{
2405 struct Scsi_Host *shost = class_to_shost(dev);
2406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2407
2408 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
2409 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
2410}
2411
2412/**
2413 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
2414 * for Optimized Access Storage (OAS) operations.
2415 * @dev: class device that is converted into a Scsi_host.
2416 * @attr: device attribute, not used.
2417 * @buf: buffer for passing information.
2418 * @count: Size of the data buffer.
2419 *
2420 * Returns:
2421 * -EINVAL count is invalid, invalid wwpn byte invalid
2422 * -EPERM oas is not supported by hba
2423 * value of count on success
2424 **/
2425static ssize_t
2426lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
2427 const char *buf, size_t count)
2428{
2429 struct Scsi_Host *shost = class_to_shost(dev);
2430 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2431 unsigned int cnt = count;
2432 uint8_t wwpn[WWN_SZ];
2433 int rc;
2434
2435 if (!phba->cfg_EnableXLane)
2436 return -EPERM;
2437
2438 /* count may include a LF at end of string */
2439 if (buf[cnt-1] == '\n')
2440 cnt--;
2441
2442 rc = lpfc_wwn_set(buf, cnt, wwpn);
2443 if (rc)
2444 return rc;
2445
2446 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2447 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2448 if (wwn_to_u64(wwpn) == 0)
2449 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
2450 else
2451 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
2452 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2453 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2454 return count;
2455}
2456static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
2457 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
2458
2459/**
2460 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
2461 * of whether luns will be enabled or disabled
2462 * for Optimized Access Storage (OAS) operations.
2463 * @dev: class device that is converted into a Scsi_host.
2464 * @attr: device attribute, not used.
2465 * @buf: buffer for passing information.
2466 *
2467 * Returns:
2468 * size of formatted string.
2469 **/
2470static ssize_t
2471lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
2472 char *buf)
2473{
2474 struct Scsi_Host *shost = class_to_shost(dev);
2475 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2476
2477 return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
2478}
2479
2480/**
2481 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
2482 * of whether luns will be enabled or disabled
2483 * for Optimized Access Storage (OAS) operations.
2484 * @dev: class device that is converted into a Scsi_host.
2485 * @attr: device attribute, not used.
2486 * @buf: buffer for passing information.
2487 * @count: Size of the data buffer.
2488 *
2489 * Returns:
2490 * -EINVAL count is invalid, invalid wwpn byte invalid
2491 * -EPERM oas is not supported by hba
2492 * value of count on success
2493 **/
2494static ssize_t
2495lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
2496 const char *buf, size_t count)
2497{
2498 struct Scsi_Host *shost = class_to_shost(dev);
2499 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2500 int val = 0;
2501
2502 if (!phba->cfg_EnableXLane)
2503 return -EPERM;
2504
2505 if (!isdigit(buf[0]))
2506 return -EINVAL;
2507
2508 if (sscanf(buf, "%i", &val) != 1)
2509 return -EINVAL;
2510
2511 if ((val != 0) && (val != 1))
2512 return -EINVAL;
2513
2514 phba->cfg_oas_lun_state = val;
2515
2516 return strlen(buf);
2517}
2518static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
2519 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
2520
2521/**
2522 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
2523 * Storage (OAS) lun returned by the
2524 * lpfc_oas_lun_show function.
2525 * @dev: class device that is converted into a Scsi_host.
2526 * @attr: device attribute, not used.
2527 * @buf: buffer for passing information.
2528 *
2529 * Returns:
2530 * size of formatted string.
2531 **/
2532static ssize_t
2533lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
2534 char *buf)
2535{
2536 struct Scsi_Host *shost = class_to_shost(dev);
2537 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2538
2539 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
2540 return -EFAULT;
2541
2542 return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
2543}
2544static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
2545 lpfc_oas_lun_status_show, NULL);
2546
2547
2548/**
2549 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
2550 * (OAS) operations.
2551 * @phba: lpfc_hba pointer.
2552 * @ndlp: pointer to fcp target node.
2553 * @lun: the fc lun for setting oas state.
2554 * @oas_state: the oas state to be set to the lun.
2555 *
2556 * Returns:
2557 * SUCCESS : 0
2558 * -EPERM OAS is not enabled or not supported by this port.
2559 *
2560 */
2561static size_t
2562lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2563 uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
2564{
2565
2566 int rc = 0;
2567
2568 if (!phba->cfg_EnableXLane)
2569 return -EPERM;
2570
2571 if (oas_state) {
2572 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
2573 (struct lpfc_name *)tgt_wwpn, lun))
2574 rc = -ENOMEM;
2575 } else {
2576 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
2577 (struct lpfc_name *)tgt_wwpn, lun);
2578 }
2579 return rc;
2580
2581}
2582
2583/**
2584 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
2585 * Access Storage (OAS) operations.
2586 * @phba: lpfc_hba pointer.
2587 * @vpt_wwpn: wwpn of the vport associated with the returned lun
2588 * @tgt_wwpn: wwpn of the target associated with the returned lun
2589 * @lun_status: status of the lun returned lun
2590 *
2591 * Returns the first or next lun enabled for OAS operations for the vport/target
2592 * specified. If a lun is found, its vport wwpn, target wwpn and status is
2593 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
2594 *
2595 * Return:
2596 * lun that is OAS enabled for the vport/target
2597 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
2598 */
2599static uint64_t
2600lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2601 uint8_t tgt_wwpn[], uint32_t *lun_status)
2602{
2603 uint64_t found_lun;
2604
2605 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
2606 return NOT_OAS_ENABLED_LUN;
2607 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
2608 phba->sli4_hba.oas_next_vpt_wwpn,
2609 (struct lpfc_name *)
2610 phba->sli4_hba.oas_next_tgt_wwpn,
2611 &phba->sli4_hba.oas_next_lun,
2612 (struct lpfc_name *)vpt_wwpn,
2613 (struct lpfc_name *)tgt_wwpn,
2614 &found_lun, lun_status))
2615 return found_lun;
2616 else
2617 return NOT_OAS_ENABLED_LUN;
2618}
2619
2620/**
2621 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
2622 * @phba: lpfc_hba pointer.
2623 * @vpt_wwpn: vport wwpn by reference.
2624 * @tgt_wwpn: target wwpn by reference.
2625 * @lun: the fc lun for setting oas state.
2626 * @oas_state: the oas state to be set to the oas_lun.
2627 *
2628 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
2629 * a lun for OAS operations.
2630 *
2631 * Return:
2632 * SUCCESS: 0
2633 * -ENOMEM: failed to enable an lun for OAS operations
2634 * -EPERM: OAS is not enabled
2635 */
2636static ssize_t
2637lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2638 uint8_t tgt_wwpn[], uint64_t lun,
2639 uint32_t oas_state)
2640{
2641
2642 int rc;
2643
2644 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
2645 oas_state);
2646 return rc;
2647}
2648
2649/**
2650 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
2651 * @dev: class device that is converted into a Scsi_host.
2652 * @attr: device attribute, not used.
2653 * @buf: buffer for passing information.
2654 *
2655 * This routine returns a lun enabled for OAS each time the function
2656 * is called.
2657 *
2658 * Returns:
2659 * SUCCESS: size of formatted string.
2660 * -EFAULT: target or vport wwpn was not set properly.
2661 * -EPERM: oas is not enabled.
2662 **/
2663static ssize_t
2664lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
2665 char *buf)
2666{
2667 struct Scsi_Host *shost = class_to_shost(dev);
2668 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2669
2670 uint64_t oas_lun;
2671 int len = 0;
2672
2673 if (!phba->cfg_EnableXLane)
2674 return -EPERM;
2675
2676 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
2677 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
2678 return -EFAULT;
2679
2680 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
2681 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
2682 return -EFAULT;
2683
2684 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
2685 phba->cfg_oas_tgt_wwpn,
2686 &phba->cfg_oas_lun_status);
2687 if (oas_lun != NOT_OAS_ENABLED_LUN)
2688 phba->cfg_oas_flags |= OAS_LUN_VALID;
2689
2690 len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
2691
2692 return len;
2693}
2694
2695/**
2696 * lpfc_oas_lun_store - Sets the OAS state for lun
2697 * @dev: class device that is converted into a Scsi_host.
2698 * @attr: device attribute, not used.
2699 * @buf: buffer for passing information.
2700 *
2701 * This function sets the OAS state for lun. Before this function is called,
2702 * the vport wwpn, target wwpn, and oas state need to be set.
2703 *
2704 * Returns:
2705 * SUCCESS: size of formatted string.
2706 * -EFAULT: target or vport wwpn was not set properly.
2707 * -EPERM: oas is not enabled.
2708 * size of formatted string.
2709 **/
2710static ssize_t
2711lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2712 const char *buf, size_t count)
2713{
2714 struct Scsi_Host *shost = class_to_shost(dev);
2715 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2716 uint64_t scsi_lun;
2717 ssize_t rc;
2718
2719 if (!phba->cfg_EnableXLane)
2720 return -EPERM;
2721
2722 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
2723 return -EFAULT;
2724
2725 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
2726 return -EFAULT;
2727
2728 if (!isdigit(buf[0]))
2729 return -EINVAL;
2730
2731 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
2732 return -EINVAL;
2733
2734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2735 "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
2736 "with oas set to %d\n",
2737 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
2738 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
2739 phba->cfg_oas_lun_state);
2740
2741 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
2742 phba->cfg_oas_tgt_wwpn, scsi_lun,
2743 phba->cfg_oas_lun_state);
2744
2745 if (rc)
2746 return rc;
2747
2748 return count;
2749}
2750static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
2751 lpfc_oas_lun_show, lpfc_oas_lun_store);
2282 2752
2283static int lpfc_poll = 0; 2753static int lpfc_poll = 0;
2284module_param(lpfc_poll, int, S_IRUGO); 2754module_param(lpfc_poll, int, S_IRUGO);
@@ -4157,6 +4627,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
4157LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); 4627LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
4158 4628
4159/* 4629/*
4630# lpfc_EnableXLane: Enable Express Lane Feature
4631# 0x0 Express Lane Feature disabled
4632# 0x1 Express Lane Feature enabled
4633# Value range is [0,1]. Default value is 0.
4634*/
4635LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
4636
4637/*
4638# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
4639# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
4640# Value range is [0x0,0x7f]. Default value is 0
4641*/
4642LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
4643
4644/*
4160# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 4645# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
4161# 0 = BlockGuard disabled (default) 4646# 0 = BlockGuard disabled (default)
4162# 1 = BlockGuard enabled 4647# 1 = BlockGuard enabled
@@ -4317,6 +4802,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
4317 &dev_attr_lpfc_soft_wwn_enable, 4802 &dev_attr_lpfc_soft_wwn_enable,
4318 &dev_attr_lpfc_enable_hba_reset, 4803 &dev_attr_lpfc_enable_hba_reset,
4319 &dev_attr_lpfc_enable_hba_heartbeat, 4804 &dev_attr_lpfc_enable_hba_heartbeat,
4805 &dev_attr_lpfc_EnableXLane,
4806 &dev_attr_lpfc_XLanePriority,
4807 &dev_attr_lpfc_xlane_lun,
4808 &dev_attr_lpfc_xlane_tgt,
4809 &dev_attr_lpfc_xlane_vpt,
4810 &dev_attr_lpfc_xlane_lun_state,
4811 &dev_attr_lpfc_xlane_lun_status,
4320 &dev_attr_lpfc_sg_seg_cnt, 4812 &dev_attr_lpfc_sg_seg_cnt,
4321 &dev_attr_lpfc_max_scsicmpl_time, 4813 &dev_attr_lpfc_max_scsicmpl_time,
4322 &dev_attr_lpfc_stat_data_ctrl, 4814 &dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4827,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4335 &dev_attr_lpfc_dss, 4827 &dev_attr_lpfc_dss,
4336 &dev_attr_lpfc_sriov_hw_max_virtfn, 4828 &dev_attr_lpfc_sriov_hw_max_virtfn,
4337 &dev_attr_protocol, 4829 &dev_attr_protocol,
4830 &dev_attr_lpfc_xlane_supported,
4338 NULL, 4831 NULL,
4339}; 4832};
4340 4833
@@ -5296,11 +5789,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5296 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5789 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
5297 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 5790 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
5298 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 5791 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
5792 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
5793 if (phba->sli_rev != LPFC_SLI_REV4)
5794 phba->cfg_EnableXLane = 0;
5795 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
5796 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
5797 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
5798 phba->cfg_oas_lun_state = 0;
5799 phba->cfg_oas_lun_status = 0;
5800 phba->cfg_oas_flags = 0;
5299 lpfc_enable_bg_init(phba, lpfc_enable_bg); 5801 lpfc_enable_bg_init(phba, lpfc_enable_bg);
5300 if (phba->sli_rev == LPFC_SLI_REV4) 5802 if (phba->sli_rev == LPFC_SLI_REV4)
5301 phba->cfg_poll = 0; 5803 phba->cfg_poll = 0;
5302 else 5804 else
5303 phba->cfg_poll = lpfc_poll; 5805 phba->cfg_poll = lpfc_poll;
5304 phba->cfg_soft_wwnn = 0L; 5806 phba->cfg_soft_wwnn = 0L;
5305 phba->cfg_soft_wwpn = 0L; 5807 phba->cfg_soft_wwpn = 0L;
5306 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 5808 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 090945167d7c..4d5ee77bc98c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
187void lpfc_offline(struct lpfc_hba *); 187void lpfc_offline(struct lpfc_hba *);
188void lpfc_reset_hba(struct lpfc_hba *); 188void lpfc_reset_hba(struct lpfc_hba *);
189 189
190int lpfc_fof_queue_create(struct lpfc_hba *);
191int lpfc_fof_queue_setup(struct lpfc_hba *);
192int lpfc_fof_queue_destroy(struct lpfc_hba *);
193irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
194
190int lpfc_sli_setup(struct lpfc_hba *); 195int lpfc_sli_setup(struct lpfc_hba *);
191int lpfc_sli_queue_setup(struct lpfc_hba *); 196int lpfc_sli_queue_setup(struct lpfc_hba *);
192 197
@@ -472,3 +477,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
472uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); 477uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
473int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); 478int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
474void lpfc_sli4_offline_eratt(struct lpfc_hba *); 479void lpfc_sli4_offline_eratt(struct lpfc_hba *);
480
481struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
482 struct lpfc_name *,
483 struct lpfc_name *,
484 uint64_t, bool);
485void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
486struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
487 struct list_head *list,
488 struct lpfc_name *,
489 struct lpfc_name *, uint64_t);
490bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
491 struct lpfc_name *, uint64_t);
492bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
493 struct lpfc_name *, uint64_t);
494bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
495 struct lpfc_name *, uint64_t *, struct lpfc_name *,
496 struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c3c3fbe43083..828c08e9389e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@ proc_cq:
2280 } 2280 }
2281 } 2281 }
2282 2282
2283 if (phba->cfg_fof) {
2284 /* FOF EQ */
2285 qp = phba->sli4_hba.fof_eq;
2286 if (!qp)
2287 goto out;
2288
2289 len += snprintf(pbuffer+len,
2290 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2291 "\nFOF EQ info: "
2292 "EQ-STAT[max:x%x noE:x%x "
2293 "bs:x%x proc:x%llx]\n",
2294 qp->q_cnt_1, qp->q_cnt_2,
2295 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2296
2297 len += snprintf(pbuffer+len,
2298 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2299 "EQID[%02d], "
2300 "QE-CNT[%04d], QE-SIZE[%04d], "
2301 "HOST-IDX[%04d], PORT-IDX[%04d]",
2302 qp->queue_id,
2303 qp->entry_count,
2304 qp->entry_size,
2305 qp->host_index,
2306 qp->hba_index);
2307
2308 /* Reset max counter */
2309 qp->EQ_max_eqe = 0;
2310
2311 len += snprintf(pbuffer+len,
2312 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2313 if (len >= max_cnt)
2314 goto too_big;
2315 }
2316
2317 if (phba->cfg_EnableXLane) {
2318
2319 /* OAS CQ */
2320 qp = phba->sli4_hba.oas_cq;
2321 if (qp) {
2322 len += snprintf(pbuffer+len,
2323 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2324 "\tOAS CQ info: ");
2325 len += snprintf(pbuffer+len,
2326 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2327 "AssocEQID[%02d]: "
2328 "CQ STAT[max:x%x relw:x%x "
2329 "xabt:x%x wq:x%llx]\n",
2330 qp->assoc_qid,
2331 qp->q_cnt_1, qp->q_cnt_2,
2332 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2333 len += snprintf(pbuffer+len,
2334 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2335 "\tCQID[%02d], "
2336 "QE-CNT[%04d], QE-SIZE[%04d], "
2337 "HOST-IDX[%04d], PORT-IDX[%04d]",
2338 qp->queue_id, qp->entry_count,
2339 qp->entry_size, qp->host_index,
2340 qp->hba_index);
2341
2342 /* Reset max counter */
2343 qp->CQ_max_cqe = 0;
2344
2345 len += snprintf(pbuffer+len,
2346 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2347 if (len >= max_cnt)
2348 goto too_big;
2349 }
2350
2351 /* OAS WQ */
2352 qp = phba->sli4_hba.oas_wq;
2353 if (qp) {
2354 len += snprintf(pbuffer+len,
2355 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2356 "\t\tOAS WQ info: ");
2357 len += snprintf(pbuffer+len,
2358 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2359 "AssocCQID[%02d]: "
2360 "WQ-STAT[oflow:x%x posted:x%llx]\n",
2361 qp->assoc_qid,
2362 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
2363 len += snprintf(pbuffer+len,
2364 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2365 "\t\tWQID[%02d], "
2366 "QE-CNT[%04d], QE-SIZE[%04d], "
2367 "HOST-IDX[%04d], PORT-IDX[%04d]",
2368 qp->queue_id,
2369 qp->entry_count,
2370 qp->entry_size,
2371 qp->host_index,
2372 qp->hba_index);
2373
2374 len += snprintf(pbuffer+len,
2375 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2376 if (len >= max_cnt)
2377 goto too_big;
2378 }
2379 }
2380out:
2283 spin_unlock_irq(&phba->hbalock); 2381 spin_unlock_irq(&phba->hbalock);
2284 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2382 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2285 2383
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d30ca69..3d9438ce59ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ 45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ 46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
47#define LPFC_FCP_NEXT_RING 3 47#define LPFC_FCP_NEXT_RING 3
48#define LPFC_FCP_OAS_RING 3
48 49
49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ 50#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ 51#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b116d328..fd79f7de7666 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
2616#define cfg_phwq_SHIFT 15 2616#define cfg_phwq_SHIFT 15
2617#define cfg_phwq_MASK 0x00000001 2617#define cfg_phwq_MASK 0x00000001
2618#define cfg_phwq_WORD word12 2618#define cfg_phwq_WORD word12
2619#define cfg_oas_SHIFT 25
2620#define cfg_oas_MASK 0x00000001
2621#define cfg_oas_WORD word12
2619#define cfg_loopbk_scope_SHIFT 28 2622#define cfg_loopbk_scope_SHIFT 28
2620#define cfg_loopbk_scope_MASK 0x0000000f 2623#define cfg_loopbk_scope_MASK 0x0000000f
2621#define cfg_loopbk_scope_WORD word12 2624#define cfg_loopbk_scope_WORD word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
3322#define wqe_ebde_cnt_SHIFT 0 3325#define wqe_ebde_cnt_SHIFT 0
3323#define wqe_ebde_cnt_MASK 0x0000000f 3326#define wqe_ebde_cnt_MASK 0x0000000f
3324#define wqe_ebde_cnt_WORD word10 3327#define wqe_ebde_cnt_WORD word10
3328#define wqe_oas_SHIFT 6
3329#define wqe_oas_MASK 0x00000001
3330#define wqe_oas_WORD word10
3325#define wqe_lenloc_SHIFT 7 3331#define wqe_lenloc_SHIFT 7
3326#define wqe_lenloc_MASK 0x00000003 3332#define wqe_lenloc_MASK 0x00000003
3327#define wqe_lenloc_WORD word10 3333#define wqe_lenloc_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index aa29ea099188..157ad1ceceae 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
81static void lpfc_sli4_disable_intr(struct lpfc_hba *); 81static void lpfc_sli4_disable_intr(struct lpfc_hba *);
82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
83static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
83 84
84static struct scsi_transport_template *lpfc_transport_template = NULL; 85static struct scsi_transport_template *lpfc_transport_template = NULL;
85static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 86static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -4856,6 +4857,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4856 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4857 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4857 struct lpfc_mqe *mqe; 4858 struct lpfc_mqe *mqe;
4858 int longs; 4859 int longs;
4860 int fof_vectors = 0;
4859 4861
4860 /* Get all the module params for configuring this host */ 4862 /* Get all the module params for configuring this host */
4861 lpfc_get_cfgparam(phba); 4863 lpfc_get_cfgparam(phba);
@@ -5121,6 +5123,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5121 } 5123 }
5122 } 5124 }
5123 mempool_free(mboxq, phba->mbox_mem_pool); 5125 mempool_free(mboxq, phba->mbox_mem_pool);
5126
5127 /* Verify OAS is supported */
5128 lpfc_sli4_oas_verify(phba);
5129 if (phba->cfg_fof)
5130 fof_vectors = 1;
5131
5124 /* Verify all the SLI4 queues */ 5132 /* Verify all the SLI4 queues */
5125 rc = lpfc_sli4_queue_verify(phba); 5133 rc = lpfc_sli4_queue_verify(phba);
5126 if (rc) 5134 if (rc)
@@ -5162,7 +5170,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5162 5170
5163 phba->sli4_hba.fcp_eq_hdl = 5171 phba->sli4_hba.fcp_eq_hdl =
5164 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5172 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5165 phba->cfg_fcp_io_channel), GFP_KERNEL); 5173 (fof_vectors + phba->cfg_fcp_io_channel)),
5174 GFP_KERNEL);
5166 if (!phba->sli4_hba.fcp_eq_hdl) { 5175 if (!phba->sli4_hba.fcp_eq_hdl) {
5167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5168 "2572 Failed allocate memory for " 5177 "2572 Failed allocate memory for "
@@ -5172,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5172 } 5181 }
5173 5182
5174 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5183 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5175 phba->cfg_fcp_io_channel), GFP_KERNEL); 5184 (fof_vectors +
5185 phba->cfg_fcp_io_channel)), GFP_KERNEL);
5176 if (!phba->sli4_hba.msix_entries) { 5186 if (!phba->sli4_hba.msix_entries) {
5177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5178 "2573 Failed allocate memory for msi-x " 5188 "2573 Failed allocate memory for msi-x "
@@ -5393,6 +5403,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5393 /* Initialize FCF connection rec list */ 5403 /* Initialize FCF connection rec list */
5394 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5404 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5395 5405
5406 /* Initialize OAS configuration list */
5407 spin_lock_init(&phba->devicelock);
5408 INIT_LIST_HEAD(&phba->luns);
5409
5396 return 0; 5410 return 0;
5397} 5411}
5398 5412
@@ -6819,6 +6833,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6819 int cfg_fcp_io_channel; 6833 int cfg_fcp_io_channel;
6820 uint32_t cpu; 6834 uint32_t cpu;
6821 uint32_t i = 0; 6835 uint32_t i = 0;
6836 int fof_vectors = phba->cfg_fof ? 1 : 0;
6822 6837
6823 /* 6838 /*
6824 * Sanity check for configured queue parameters against the run-time 6839 * Sanity check for configured queue parameters against the run-time
@@ -6845,7 +6860,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6845 cfg_fcp_io_channel = i; 6860 cfg_fcp_io_channel = i;
6846 } 6861 }
6847 6862
6848 if (cfg_fcp_io_channel > 6863 if (cfg_fcp_io_channel + fof_vectors >
6849 phba->sli4_hba.max_cfg_param.max_eq) { 6864 phba->sli4_hba.max_cfg_param.max_eq) {
6850 if (phba->sli4_hba.max_cfg_param.max_eq < 6865 if (phba->sli4_hba.max_cfg_param.max_eq <
6851 LPFC_FCP_IO_CHAN_MIN) { 6866 LPFC_FCP_IO_CHAN_MIN) {
@@ -6862,7 +6877,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6862 "available EQs: from %d to %d\n", 6877 "available EQs: from %d to %d\n",
6863 cfg_fcp_io_channel, 6878 cfg_fcp_io_channel,
6864 phba->sli4_hba.max_cfg_param.max_eq); 6879 phba->sli4_hba.max_cfg_param.max_eq);
6865 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6880 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
6881 fof_vectors;
6866 } 6882 }
6867 6883
6868 /* The actual number of FCP event queues adopted */ 6884 /* The actual number of FCP event queues adopted */
@@ -7073,6 +7089,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
7073 } 7089 }
7074 phba->sli4_hba.dat_rq = qdesc; 7090 phba->sli4_hba.dat_rq = qdesc;
7075 7091
7092 /* Create the Queues needed for Flash Optimized Fabric operations */
7093 if (phba->cfg_fof)
7094 lpfc_fof_queue_create(phba);
7076 return 0; 7095 return 0;
7077 7096
7078out_error: 7097out_error:
@@ -7097,6 +7116,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7097{ 7116{
7098 int idx; 7117 int idx;
7099 7118
7119 if (phba->cfg_fof)
7120 lpfc_fof_queue_destroy(phba);
7121
7100 if (phba->sli4_hba.hba_eq != NULL) { 7122 if (phba->sli4_hba.hba_eq != NULL) {
7101 /* Release HBA event queue */ 7123 /* Release HBA event queue */
7102 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7124 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7481,8 +7503,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7481 phba->sli4_hba.hdr_rq->queue_id, 7503 phba->sli4_hba.hdr_rq->queue_id,
7482 phba->sli4_hba.dat_rq->queue_id, 7504 phba->sli4_hba.dat_rq->queue_id,
7483 phba->sli4_hba.els_cq->queue_id); 7505 phba->sli4_hba.els_cq->queue_id);
7506
7507 if (phba->cfg_fof) {
7508 rc = lpfc_fof_queue_setup(phba);
7509 if (rc) {
7510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7511 "0549 Failed setup of FOF Queues: "
7512 "rc = 0x%x\n", rc);
7513 goto out_destroy_els_rq;
7514 }
7515 }
7484 return 0; 7516 return 0;
7485 7517
7518out_destroy_els_rq:
7519 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7486out_destroy_els_wq: 7520out_destroy_els_wq:
7487 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7521 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7488out_destroy_mbx_wq: 7522out_destroy_mbx_wq:
@@ -7521,6 +7555,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7521{ 7555{
7522 int fcp_qidx; 7556 int fcp_qidx;
7523 7557
7558 /* Unset the queues created for Flash Optimized Fabric operations */
7559 if (phba->cfg_fof)
7560 lpfc_fof_queue_destroy(phba);
7524 /* Unset mailbox command work queue */ 7561 /* Unset mailbox command work queue */
7525 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7562 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7526 /* Unset ELS work queue */ 7563 /* Unset ELS work queue */
@@ -8638,6 +8675,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8638 8675
8639 /* Configure MSI-X capability structure */ 8676 /* Configure MSI-X capability structure */
8640 vectors = phba->cfg_fcp_io_channel; 8677 vectors = phba->cfg_fcp_io_channel;
8678 if (phba->cfg_fof) {
8679 phba->sli4_hba.msix_entries[index].entry = index;
8680 vectors++;
8681 }
8641enable_msix_vectors: 8682enable_msix_vectors:
8642 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8683 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8643 vectors); 8684 vectors);
@@ -8667,7 +8708,15 @@ enable_msix_vectors:
8667 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8708 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8668 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8709 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8669 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8710 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8670 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8711 if (phba->cfg_fof && (index == (vectors - 1)))
8712 rc = request_irq(
8713 phba->sli4_hba.msix_entries[index].vector,
8714 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
8715 (char *)&phba->sli4_hba.handler_name[index],
8716 &phba->sli4_hba.fcp_eq_hdl[index]);
8717 else
8718 rc = request_irq(
8719 phba->sli4_hba.msix_entries[index].vector,
8671 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8720 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8672 (char *)&phba->sli4_hba.handler_name[index], 8721 (char *)&phba->sli4_hba.handler_name[index],
8673 &phba->sli4_hba.fcp_eq_hdl[index]); 8722 &phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8679,6 +8728,9 @@ enable_msix_vectors:
8679 } 8728 }
8680 } 8729 }
8681 8730
8731 if (phba->cfg_fof)
8732 vectors--;
8733
8682 if (vectors != phba->cfg_fcp_io_channel) { 8734 if (vectors != phba->cfg_fcp_io_channel) {
8683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8684 "3238 Reducing IO channels to match number of " 8736 "3238 Reducing IO channels to match number of "
@@ -8724,7 +8776,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8724 free_irq(phba->sli4_hba.msix_entries[index].vector, 8776 free_irq(phba->sli4_hba.msix_entries[index].vector,
8725 &phba->sli4_hba.fcp_eq_hdl[index]); 8777 &phba->sli4_hba.fcp_eq_hdl[index]);
8726 } 8778 }
8727 8779 if (phba->cfg_fof) {
8780 free_irq(phba->sli4_hba.msix_entries[index].vector,
8781 &phba->sli4_hba.fcp_eq_hdl[index]);
8782 }
8728 /* Disable MSI-X */ 8783 /* Disable MSI-X */
8729 pci_disable_msix(phba->pcidev); 8784 pci_disable_msix(phba->pcidev);
8730 8785
@@ -8774,6 +8829,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8774 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8829 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8775 } 8830 }
8776 8831
8832 if (phba->cfg_fof) {
8833 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8834 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8835 }
8777 return 0; 8836 return 0;
8778} 8837}
8779 8838
@@ -8856,6 +8915,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8856 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 8915 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8857 fcp_eq_in_use, 1); 8916 fcp_eq_in_use, 1);
8858 } 8917 }
8918 if (phba->cfg_fof) {
8919 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8920 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8921 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8922 fcp_eq_in_use, 1);
8923 }
8859 } 8924 }
8860 } 8925 }
8861 return intr_mode; 8926 return intr_mode;
@@ -9166,6 +9231,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9166 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9231 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9167 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9232 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9168 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9233 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9234 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
9169 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9235 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9170 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9236 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9171 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9237 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10799,6 +10865,169 @@ lpfc_io_resume(struct pci_dev *pdev)
10799 return; 10865 return;
10800} 10866}
10801 10867
10868/**
10869 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
10870 * @phba: pointer to lpfc hba data structure.
10871 *
10872 * This routine checks to see if OAS is supported for this adapter. If
10873 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
10874 * the enable oas flag is cleared and the pool created for OAS device data
10875 * is destroyed.
10876 *
10877 **/
10878void
10879lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10880{
10881
10882 if (!phba->cfg_EnableXLane)
10883 return;
10884
10885 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10886 phba->cfg_fof = 1;
10887 } else {
10888 phba->cfg_EnableXLane = 0;
10889 if (phba->device_data_mem_pool)
10890 mempool_destroy(phba->device_data_mem_pool);
10891 phba->device_data_mem_pool = NULL;
10892 }
10893
10894 return;
10895}
10896
10897/**
10898 * lpfc_fof_queue_setup - Set up all the fof queues
10899 * @phba: pointer to lpfc hba data structure.
10900 *
10901 * This routine is invoked to set up all the fof queues for the FC HBA
10902 * operation.
10903 *
10904 * Return codes
10905 * 0 - successful
10906 * -ENOMEM - No available memory
10907 **/
10908int
10909lpfc_fof_queue_setup(struct lpfc_hba *phba)
10910{
10911 struct lpfc_sli *psli = &phba->sli;
10912 int rc;
10913
10914 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
10915 if (rc)
10916 return -ENOMEM;
10917
10918 if (phba->cfg_EnableXLane) {
10919
10920 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
10921 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
10922 if (rc)
10923 goto out_oas_cq;
10924
10925 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
10926 phba->sli4_hba.oas_cq, LPFC_FCP);
10927 if (rc)
10928 goto out_oas_wq;
10929
10930 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
10931 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
10932 }
10933
10934 return 0;
10935
10936out_oas_wq:
10937 if (phba->cfg_EnableXLane)
10938 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10939out_oas_cq:
10940 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
10941 return rc;
10942
10943}
10944
10945/**
10946 * lpfc_fof_queue_create - Create all the fof queues
10947 * @phba: pointer to lpfc hba data structure.
10948 *
10949 * This routine is invoked to allocate all the fof queues for the FC HBA
10950 * operation. For each SLI4 queue type, the parameters such as queue entry
10951 * count (queue depth) shall be taken from the module parameter. For now,
10952 * we just use some constant number as place holder.
10953 *
10954 * Return codes
10955 * 0 - successful
10956 * -ENOMEM - No availble memory
10957 * -EIO - The mailbox failed to complete successfully.
10958 **/
10959int
10960lpfc_fof_queue_create(struct lpfc_hba *phba)
10961{
10962 struct lpfc_queue *qdesc;
10963
10964 /* Create FOF EQ */
10965 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
10966 phba->sli4_hba.eq_ecount);
10967 if (!qdesc)
10968 goto out_error;
10969
10970 phba->sli4_hba.fof_eq = qdesc;
10971
10972 if (phba->cfg_EnableXLane) {
10973
10974 /* Create OAS CQ */
10975 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
10976 phba->sli4_hba.cq_ecount);
10977 if (!qdesc)
10978 goto out_error;
10979
10980 phba->sli4_hba.oas_cq = qdesc;
10981
10982 /* Create OAS WQ */
10983 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
10984 phba->sli4_hba.wq_ecount);
10985 if (!qdesc)
10986 goto out_error;
10987
10988 phba->sli4_hba.oas_wq = qdesc;
10989
10990 }
10991 return 0;
10992
10993out_error:
10994 lpfc_fof_queue_destroy(phba);
10995 return -ENOMEM;
10996}
10997
10998/**
10999 * lpfc_fof_queue_destroy - Destroy all the fof queues
11000 * @phba: pointer to lpfc hba data structure.
11001 *
11002 * This routine is invoked to release all the SLI4 queues with the FC HBA
11003 * operation.
11004 *
11005 * Return codes
11006 * 0 - successful
11007 **/
11008int
11009lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11010{
11011 /* Release FOF Event queue */
11012 if (phba->sli4_hba.fof_eq != NULL) {
11013 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11014 phba->sli4_hba.fof_eq = NULL;
11015 }
11016
11017 /* Release OAS Completion queue */
11018 if (phba->sli4_hba.oas_cq != NULL) {
11019 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11020 phba->sli4_hba.oas_cq = NULL;
11021 }
11022
11023 /* Release OAS Work queue */
11024 if (phba->sli4_hba.oas_wq != NULL) {
11025 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11026 phba->sli4_hba.oas_wq = NULL;
11027 }
11028 return 0;
11029}
11030
10802static struct pci_device_id lpfc_id_table[] = { 11031static struct pci_device_id lpfc_id_table[] = {
10803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 11032 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10804 PCI_ANY_ID, PCI_ANY_ID, }, 11033 PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index b1db23c70781..ed419aad2b1f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -42,6 +42,7 @@
42 42
43#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 43#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
44#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 44#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
45#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
45 46
46int 47int
47lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { 48lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
@@ -164,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
164 phba->lpfc_drb_pool = NULL; 165 phba->lpfc_drb_pool = NULL;
165 } 166 }
166 167
168 if (phba->cfg_EnableXLane) {
169 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
170 LPFC_DEVICE_DATA_POOL_SIZE,
171 sizeof(struct lpfc_device_data));
172 if (!phba->device_data_mem_pool)
173 goto fail_free_hrb_pool;
174 } else {
175 phba->device_data_mem_pool = NULL;
176 }
177
167 return 0; 178 return 0;
168 fail_free_hrb_pool: 179 fail_free_hrb_pool:
169 pci_pool_destroy(phba->lpfc_hrb_pool); 180 pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -206,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
206{ 217{
207 int i; 218 int i;
208 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 219 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
220 struct lpfc_device_data *device_data;
209 221
210 /* Free HBQ pools */ 222 /* Free HBQ pools */
211 lpfc_sli_hbqbuf_free_all(phba); 223 lpfc_sli_hbqbuf_free_all(phba);
@@ -249,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
249 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 261 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
250 phba->lpfc_scsi_dma_buf_pool = NULL; 262 phba->lpfc_scsi_dma_buf_pool = NULL;
251 263
264 /* Free Device Data memory pool */
265 if (phba->device_data_mem_pool) {
266 /* Ensure all objects have been returned to the pool */
267 while (!list_empty(&phba->luns)) {
268 device_data = list_first_entry(&phba->luns,
269 struct lpfc_device_data,
270 listentry);
271 list_del(&device_data->listentry);
272 mempool_free(device_data, phba->device_data_mem_pool);
273 }
274 mempool_destroy(phba->device_data_mem_pool);
275 }
276 phba->device_data_mem_pool = NULL;
252 return; 277 return;
253} 278}
254 279
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0b08188598cd..4015fcc6d9a1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
68 __be32 ref_tag; /* Target LBA or indirect LBA */ 68 __be32 ref_tag; /* Target LBA or indirect LBA */
69}; 69};
70 70
71static struct lpfc_rport_data *
72lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73{
74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75
76 if (vport->phba->cfg_EnableXLane)
77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 else
79 return (struct lpfc_rport_data *)sdev->hostdata;
80}
81
71static void 82static void
72lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 83lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73static void 84static void
@@ -306,7 +317,7 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
306 old_queue_depth = sdev->queue_depth; 317 old_queue_depth = sdev->queue_depth;
307 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 318 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
308 new_queue_depth = sdev->queue_depth; 319 new_queue_depth = sdev->queue_depth;
309 rdata = sdev->hostdata; 320 rdata = lpfc_rport_data_from_scsi_device(sdev);
310 if (rdata) 321 if (rdata)
311 lpfc_send_sdev_queuedepth_change_event(phba, vport, 322 lpfc_send_sdev_queuedepth_change_event(phba, vport,
312 rdata->pnode, sdev->lun, 323 rdata->pnode, sdev->lun,
@@ -1502,7 +1513,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1502 } 1513 }
1503 1514
1504 /* Next check if we need to match the remote NPortID or WWPN */ 1515 /* Next check if we need to match the remote NPortID or WWPN */
1505 rdata = sc->device->hostdata; 1516 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1506 if (rdata && rdata->pnode) { 1517 if (rdata && rdata->pnode) {
1507 ndlp = rdata->pnode; 1518 ndlp = rdata->pnode;
1508 1519
@@ -3507,6 +3518,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3507 * we need to set word 4 of IOCB here 3518 * we need to set word 4 of IOCB here
3508 */ 3519 */
3509 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 3520 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3521
3522 /*
3523 * If the OAS driver feature is enabled and the lun is enabled for
3524 * OAS, set the oas iocb related flags.
3525 */
3526 if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
3527 scsi_cmnd->device->hostdata)->oas_enabled)
3528 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
3510 return 0; 3529 return 0;
3511} 3530}
3512 3531
@@ -4691,12 +4710,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4691{ 4710{
4692 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4711 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4693 struct lpfc_hba *phba = vport->phba; 4712 struct lpfc_hba *phba = vport->phba;
4694 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 4713 struct lpfc_rport_data *rdata;
4695 struct lpfc_nodelist *ndlp; 4714 struct lpfc_nodelist *ndlp;
4696 struct lpfc_scsi_buf *lpfc_cmd; 4715 struct lpfc_scsi_buf *lpfc_cmd;
4697 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 4716 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4698 int err; 4717 int err;
4699 4718
4719 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4700 err = fc_remote_port_chkready(rport); 4720 err = fc_remote_port_chkready(rport);
4701 if (err) { 4721 if (err) {
4702 cmnd->result = err; 4722 cmnd->result = err;
@@ -5179,10 +5199,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5179static int 5199static int
5180lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 5200lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5181{ 5201{
5182 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5202 struct lpfc_rport_data *rdata;
5183 struct lpfc_nodelist *pnode; 5203 struct lpfc_nodelist *pnode;
5184 unsigned long later; 5204 unsigned long later;
5185 5205
5206 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5186 if (!rdata) { 5207 if (!rdata) {
5187 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5208 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5188 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 5209 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5200,7 +5221,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5200 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5221 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5201 return SUCCESS; 5222 return SUCCESS;
5202 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5223 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5203 rdata = cmnd->device->hostdata; 5224 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5204 if (!rdata) 5225 if (!rdata)
5205 return FAILED; 5226 return FAILED;
5206 pnode = rdata->pnode; 5227 pnode = rdata->pnode;
@@ -5272,13 +5293,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5272{ 5293{
5273 struct Scsi_Host *shost = cmnd->device->host; 5294 struct Scsi_Host *shost = cmnd->device->host;
5274 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5275 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5296 struct lpfc_rport_data *rdata;
5276 struct lpfc_nodelist *pnode; 5297 struct lpfc_nodelist *pnode;
5277 unsigned tgt_id = cmnd->device->id; 5298 unsigned tgt_id = cmnd->device->id;
5278 unsigned int lun_id = cmnd->device->lun; 5299 unsigned int lun_id = cmnd->device->lun;
5279 struct lpfc_scsi_event_header scsi_event; 5300 struct lpfc_scsi_event_header scsi_event;
5280 int status; 5301 int status;
5281 5302
5303 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5282 if (!rdata) { 5304 if (!rdata) {
5283 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5305 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5284 "0798 Device Reset rport failure: rdata x%p\n", rdata); 5306 "0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5341,13 +5363,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5341{ 5363{
5342 struct Scsi_Host *shost = cmnd->device->host; 5364 struct Scsi_Host *shost = cmnd->device->host;
5343 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5365 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5344 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5366 struct lpfc_rport_data *rdata;
5345 struct lpfc_nodelist *pnode; 5367 struct lpfc_nodelist *pnode;
5346 unsigned tgt_id = cmnd->device->id; 5368 unsigned tgt_id = cmnd->device->id;
5347 unsigned int lun_id = cmnd->device->lun; 5369 unsigned int lun_id = cmnd->device->lun;
5348 struct lpfc_scsi_event_header scsi_event; 5370 struct lpfc_scsi_event_header scsi_event;
5349 int status; 5371 int status;
5350 5372
5373 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5351 if (!rdata) { 5374 if (!rdata) {
5352 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5375 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5353 "0799 Target Reset rport failure: rdata x%p\n", rdata); 5376 "0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5547,11 +5570,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5547 uint32_t num_to_alloc = 0; 5570 uint32_t num_to_alloc = 0;
5548 int num_allocated = 0; 5571 int num_allocated = 0;
5549 uint32_t sdev_cnt; 5572 uint32_t sdev_cnt;
5573 struct lpfc_device_data *device_data;
5574 unsigned long flags;
5575 struct lpfc_name target_wwpn;
5550 5576
5551 if (!rport || fc_remote_port_chkready(rport)) 5577 if (!rport || fc_remote_port_chkready(rport))
5552 return -ENXIO; 5578 return -ENXIO;
5553 5579
5554 sdev->hostdata = rport->dd_data; 5580 if (phba->cfg_EnableXLane) {
5581
5582 /*
5583 * Check to see if the device data structure for the lun
5584 * exists. If not, create one.
5585 */
5586
5587 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5588 spin_lock_irqsave(&phba->devicelock, flags);
5589 device_data = __lpfc_get_device_data(phba,
5590 &phba->luns,
5591 &vport->fc_portname,
5592 &target_wwpn,
5593 sdev->lun);
5594 if (!device_data) {
5595 spin_unlock_irqrestore(&phba->devicelock, flags);
5596 device_data = lpfc_create_device_data(phba,
5597 &vport->fc_portname,
5598 &target_wwpn,
5599 sdev->lun, true);
5600 if (!device_data)
5601 return -ENOMEM;
5602 spin_lock_irqsave(&phba->devicelock, flags);
5603 list_add_tail(&device_data->listentry, &phba->luns);
5604 }
5605 device_data->rport_data = rport->dd_data;
5606 device_data->available = true;
5607 spin_unlock_irqrestore(&phba->devicelock, flags);
5608 sdev->hostdata = device_data;
5609 } else {
5610 sdev->hostdata = rport->dd_data;
5611 }
5555 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 5612 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5556 5613
5557 /* 5614 /*
@@ -5641,11 +5698,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
5641{ 5698{
5642 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5699 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5643 struct lpfc_hba *phba = vport->phba; 5700 struct lpfc_hba *phba = vport->phba;
5701 unsigned long flags;
5702 struct lpfc_device_data *device_data = sdev->hostdata;
5703
5644 atomic_dec(&phba->sdev_cnt); 5704 atomic_dec(&phba->sdev_cnt);
5705 if ((phba->cfg_EnableXLane) && (device_data)) {
5706 spin_lock_irqsave(&phba->devicelock, flags);
5707 device_data->available = false;
5708 if (!device_data->oas_enabled)
5709 lpfc_delete_device_data(phba, device_data);
5710 spin_unlock_irqrestore(&phba->devicelock, flags);
5711 }
5645 sdev->hostdata = NULL; 5712 sdev->hostdata = NULL;
5646 return; 5713 return;
5647} 5714}
5648 5715
5716/**
5717 * lpfc_create_device_data - creates and initializes device data structure for OAS
5718 * @pha: Pointer to host bus adapter structure.
5719 * @vport_wwpn: Pointer to vport's wwpn information
5720 * @target_wwpn: Pointer to target's wwpn information
5721 * @lun: Lun on target
5722 * @atomic_create: Flag to indicate if memory should be allocated using the
5723 * GFP_ATOMIC flag or not.
5724 *
5725 * This routine creates a device data structure which will contain identifying
5726 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5727 * whether or not the corresponding lun is available by the system,
5728 * and pointer to the rport data.
5729 *
5730 * Return codes:
5731 * NULL - Error
5732 * Pointer to lpfc_device_data - Success
5733 **/
5734struct lpfc_device_data*
5735lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5736 struct lpfc_name *target_wwpn, uint64_t lun,
5737 bool atomic_create)
5738{
5739
5740 struct lpfc_device_data *lun_info;
5741 int memory_flags;
5742
5743 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5744 !(phba->cfg_EnableXLane))
5745 return NULL;
5746
5747 /* Attempt to create the device data to contain lun info */
5748
5749 if (atomic_create)
5750 memory_flags = GFP_ATOMIC;
5751 else
5752 memory_flags = GFP_KERNEL;
5753 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5754 if (!lun_info)
5755 return NULL;
5756 INIT_LIST_HEAD(&lun_info->listentry);
5757 lun_info->rport_data = NULL;
5758 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5759 sizeof(struct lpfc_name));
5760 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5761 sizeof(struct lpfc_name));
5762 lun_info->device_id.lun = lun;
5763 lun_info->oas_enabled = false;
5764 lun_info->available = false;
5765 return lun_info;
5766}
5767
5768/**
5769 * lpfc_delete_device_data - frees a device data structure for OAS
5770 * @pha: Pointer to host bus adapter structure.
5771 * @lun_info: Pointer to device data structure to free.
5772 *
5773 * This routine frees the previously allocated device data structure passed.
5774 *
5775 **/
5776void
5777lpfc_delete_device_data(struct lpfc_hba *phba,
5778 struct lpfc_device_data *lun_info)
5779{
5780
5781 if (unlikely(!phba) || !lun_info ||
5782 !(phba->cfg_EnableXLane))
5783 return;
5784
5785 if (!list_empty(&lun_info->listentry))
5786 list_del(&lun_info->listentry);
5787 mempool_free(lun_info, phba->device_data_mem_pool);
5788 return;
5789}
5790
5791/**
5792 * __lpfc_get_device_data - returns the device data for the specified lun
5793 * @pha: Pointer to host bus adapter structure.
5794 * @list: Point to list to search.
5795 * @vport_wwpn: Pointer to vport's wwpn information
5796 * @target_wwpn: Pointer to target's wwpn information
5797 * @lun: Lun on target
5798 *
5799 * This routine searches the list passed for the specified lun's device data.
5800 * This function does not hold locks, it is the responsibility of the caller
5801 * to ensure the proper lock is held before calling the function.
5802 *
5803 * Return codes:
5804 * NULL - Error
5805 * Pointer to lpfc_device_data - Success
5806 **/
5807struct lpfc_device_data*
5808__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5809 struct lpfc_name *vport_wwpn,
5810 struct lpfc_name *target_wwpn, uint64_t lun)
5811{
5812
5813 struct lpfc_device_data *lun_info;
5814
5815 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5816 !phba->cfg_EnableXLane)
5817 return NULL;
5818
5819 /* Check to see if the lun is already enabled for OAS. */
5820
5821 list_for_each_entry(lun_info, list, listentry) {
5822 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5823 sizeof(struct lpfc_name)) == 0) &&
5824 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5825 sizeof(struct lpfc_name)) == 0) &&
5826 (lun_info->device_id.lun == lun))
5827 return lun_info;
5828 }
5829
5830 return NULL;
5831}
5832
5833/**
5834 * lpfc_find_next_oas_lun - searches for the next oas lun
5835 * @pha: Pointer to host bus adapter structure.
5836 * @vport_wwpn: Pointer to vport's wwpn information
5837 * @target_wwpn: Pointer to target's wwpn information
5838 * @starting_lun: Pointer to the lun to start searching for
5839 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5840 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5841 * @found_lun: Pointer to the found lun.
5842 * @found_lun_status: Pointer to status of the found lun.
5843 *
5844 * This routine searches the luns list for the specified lun
5845 * or the first lun for the vport/target. If the vport wwpn contains
5846 * a zero value then a specific vport is not specified. In this case
5847 * any vport which contains the lun will be considered a match. If the
5848 * target wwpn contains a zero value then a specific target is not specified.
5849 * In this case any target which contains the lun will be considered a
5850 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5851 * are returned. The function will also return the next lun if available.
5852 * If the next lun is not found, starting_lun parameter will be set to
5853 * NO_MORE_OAS_LUN.
5854 *
5855 * Return codes:
5856 * non-0 - Error
5857 * 0 - Success
5858 **/
5859bool
5860lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5861 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5862 struct lpfc_name *found_vport_wwpn,
5863 struct lpfc_name *found_target_wwpn,
5864 uint64_t *found_lun,
5865 uint32_t *found_lun_status)
5866{
5867
5868 unsigned long flags;
5869 struct lpfc_device_data *lun_info;
5870 struct lpfc_device_id *device_id;
5871 uint64_t lun;
5872 bool found = false;
5873
5874 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5875 !starting_lun || !found_vport_wwpn ||
5876 !found_target_wwpn || !found_lun || !found_lun_status ||
5877 (*starting_lun == NO_MORE_OAS_LUN) ||
5878 !phba->cfg_EnableXLane)
5879 return false;
5880
5881 lun = *starting_lun;
5882 *found_lun = NO_MORE_OAS_LUN;
5883 *starting_lun = NO_MORE_OAS_LUN;
5884
5885 /* Search for lun or the lun closet in value */
5886
5887 spin_lock_irqsave(&phba->devicelock, flags);
5888 list_for_each_entry(lun_info, &phba->luns, listentry) {
5889 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5890 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5891 sizeof(struct lpfc_name)) == 0)) &&
5892 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5893 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5894 sizeof(struct lpfc_name)) == 0)) &&
5895 (lun_info->oas_enabled)) {
5896 device_id = &lun_info->device_id;
5897 if ((!found) &&
5898 ((lun == FIND_FIRST_OAS_LUN) ||
5899 (device_id->lun == lun))) {
5900 *found_lun = device_id->lun;
5901 memcpy(found_vport_wwpn,
5902 &device_id->vport_wwpn,
5903 sizeof(struct lpfc_name));
5904 memcpy(found_target_wwpn,
5905 &device_id->target_wwpn,
5906 sizeof(struct lpfc_name));
5907 if (lun_info->available)
5908 *found_lun_status =
5909 OAS_LUN_STATUS_EXISTS;
5910 else
5911 *found_lun_status = 0;
5912 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5913 memset(vport_wwpn, 0x0,
5914 sizeof(struct lpfc_name));
5915 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5916 memset(target_wwpn, 0x0,
5917 sizeof(struct lpfc_name));
5918 found = true;
5919 } else if (found) {
5920 *starting_lun = device_id->lun;
5921 memcpy(vport_wwpn, &device_id->vport_wwpn,
5922 sizeof(struct lpfc_name));
5923 memcpy(target_wwpn, &device_id->target_wwpn,
5924 sizeof(struct lpfc_name));
5925 break;
5926 }
5927 }
5928 }
5929 spin_unlock_irqrestore(&phba->devicelock, flags);
5930 return found;
5931}
5932
5933/**
5934 * lpfc_enable_oas_lun - enables a lun for OAS operations
5935 * @pha: Pointer to host bus adapter structure.
5936 * @vport_wwpn: Pointer to vport's wwpn information
5937 * @target_wwpn: Pointer to target's wwpn information
5938 * @lun: Lun
5939 *
5940 * This routine enables a lun for oas operations. The routines does so by
5941 * doing the following :
5942 *
5943 * 1) Checks to see if the device data for the lun has been created.
5944 * 2) If found, sets the OAS enabled flag if not set and returns.
5945 * 3) Otherwise, creates a device data structure.
5946 * 4) If successfully created, indicates the device data is for an OAS lun,
5947 * indicates the lun is not available and add to the list of luns.
5948 *
5949 * Return codes:
5950 * false - Error
5951 * true - Success
5952 **/
5953bool
5954lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5955 struct lpfc_name *target_wwpn, uint64_t lun)
5956{
5957
5958 struct lpfc_device_data *lun_info;
5959 unsigned long flags;
5960
5961 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5962 !phba->cfg_EnableXLane)
5963 return false;
5964
5965 spin_lock_irqsave(&phba->devicelock, flags);
5966
5967 /* Check to see if the device data for the lun has been created */
5968 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5969 target_wwpn, lun);
5970 if (lun_info) {
5971 if (!lun_info->oas_enabled)
5972 lun_info->oas_enabled = true;
5973 spin_unlock_irqrestore(&phba->devicelock, flags);
5974 return true;
5975 }
5976
5977 /* Create an lun info structure and add to list of luns */
5978 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5979 false);
5980 if (lun_info) {
5981 lun_info->oas_enabled = true;
5982 lun_info->available = false;
5983 list_add_tail(&lun_info->listentry, &phba->luns);
5984 spin_unlock_irqrestore(&phba->devicelock, flags);
5985 return true;
5986 }
5987 spin_unlock_irqrestore(&phba->devicelock, flags);
5988 return false;
5989}
5990
5991/**
5992 * lpfc_disable_oas_lun - disables a lun for OAS operations
5993 * @pha: Pointer to host bus adapter structure.
5994 * @vport_wwpn: Pointer to vport's wwpn information
5995 * @target_wwpn: Pointer to target's wwpn information
5996 * @lun: Lun
5997 *
5998 * This routine disables a lun for oas operations. The routines does so by
5999 * doing the following :
6000 *
6001 * 1) Checks to see if the device data for the lun is created.
6002 * 2) If present, clears the flag indicating this lun is for OAS.
6003 * 3) If the lun is not available by the system, the device data is
6004 * freed.
6005 *
6006 * Return codes:
6007 * false - Error
6008 * true - Success
6009 **/
6010bool
6011lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6012 struct lpfc_name *target_wwpn, uint64_t lun)
6013{
6014
6015 struct lpfc_device_data *lun_info;
6016 unsigned long flags;
6017
6018 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6019 !phba->cfg_EnableXLane)
6020 return false;
6021
6022 spin_lock_irqsave(&phba->devicelock, flags);
6023
6024 /* Check to see if the lun is available. */
6025 lun_info = __lpfc_get_device_data(phba,
6026 &phba->luns, vport_wwpn,
6027 target_wwpn, lun);
6028 if (lun_info) {
6029 lun_info->oas_enabled = false;
6030 if (!lun_info->available)
6031 lpfc_delete_device_data(phba, lun_info);
6032 spin_unlock_irqrestore(&phba->devicelock, flags);
6033 return true;
6034 }
6035
6036 spin_unlock_irqrestore(&phba->devicelock, flags);
6037 return false;
6038}
5649 6039
5650struct scsi_host_template lpfc_template = { 6040struct scsi_host_template lpfc_template = {
5651 .module = THIS_MODULE, 6041 .module = THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7def493..0120bfccf50b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
41 struct lpfc_nodelist *pnode; /* Pointer to the node structure. */ 41 struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
42}; 42};
43 43
44struct lpfc_device_id {
45 struct lpfc_name vport_wwpn;
46 struct lpfc_name target_wwpn;
47 uint64_t lun;
48};
49
50struct lpfc_device_data {
51 struct list_head listentry;
52 struct lpfc_rport_data *rport_data;
53 struct lpfc_device_id device_id;
54 bool oas_enabled;
55 bool available;
56};
57
44struct fcp_rsp { 58struct fcp_rsp {
45 uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */ 59 uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
46 uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */ 60 uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
166#define LPFC_SCSI_DMA_EXT_SIZE 264 180#define LPFC_SCSI_DMA_EXT_SIZE 264
167#define LPFC_BPL_SIZE 1024 181#define LPFC_BPL_SIZE 1024
168#define MDAC_DIRECT_CMD 0x22 182#define MDAC_DIRECT_CMD 0x22
183
184#define FIND_FIRST_OAS_LUN 0
185#define NO_MORE_OAS_LUN -1
186#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c7181d85b993..38e56d91aef4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4976,12 +4976,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4976 LPFC_QUEUE_REARM); 4976 LPFC_QUEUE_REARM);
4977 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 4977 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4978 } 4978 }
4979
4980 if (phba->cfg_EnableXLane)
4981 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
4982
4979 if (phba->sli4_hba.hba_eq) { 4983 if (phba->sli4_hba.hba_eq) {
4980 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 4984 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4981 fcp_eqidx++) 4985 fcp_eqidx++)
4982 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 4986 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4983 LPFC_QUEUE_REARM); 4987 LPFC_QUEUE_REARM);
4984 } 4988 }
4989
4990 if (phba->cfg_fof)
4991 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
4985} 4992}
4986 4993
4987/** 4994/**
@@ -8256,6 +8263,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8256 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8263 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8257 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8264 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8258 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8265 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8266 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8267 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8268 if (phba->cfg_XLanePriority) {
8269 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8270 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8271 (phba->cfg_XLanePriority << 1));
8272 }
8273 }
8259 break; 8274 break;
8260 case CMD_FCP_IREAD64_CR: 8275 case CMD_FCP_IREAD64_CR:
8261 /* word3 iocb=iotag wqe=payload_offset_len */ 8276 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8277,6 +8292,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8277 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8292 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8278 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8293 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8279 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8294 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8295 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8296 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8297 if (phba->cfg_XLanePriority) {
8298 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8299 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8300 (phba->cfg_XLanePriority << 1));
8301 }
8302 }
8280 break; 8303 break;
8281 case CMD_FCP_ICMND64_CR: 8304 case CMD_FCP_ICMND64_CR:
8282 /* word3 iocb=iotag wqe=payload_offset_len */ 8305 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8297,6 +8320,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8297 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8320 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8298 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8321 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8299 iocbq->iocb.ulpFCP2Rcvy); 8322 iocbq->iocb.ulpFCP2Rcvy);
8323 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8324 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8325 if (phba->cfg_XLanePriority) {
8326 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8327 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8328 (phba->cfg_XLanePriority << 1));
8329 }
8330 }
8300 break; 8331 break;
8301 case CMD_GEN_REQUEST64_CR: 8332 case CMD_GEN_REQUEST64_CR:
8302 /* For this command calculate the xmit length of the 8333 /* For this command calculate the xmit length of the
@@ -8529,6 +8560,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8529{ 8560{
8530 struct lpfc_sglq *sglq; 8561 struct lpfc_sglq *sglq;
8531 union lpfc_wqe wqe; 8562 union lpfc_wqe wqe;
8563 struct lpfc_queue *wq;
8532 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8564 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8533 8565
8534 if (piocb->sli4_xritag == NO_XRI) { 8566 if (piocb->sli4_xritag == NO_XRI) {
@@ -8581,11 +8613,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8581 return IOCB_ERROR; 8613 return IOCB_ERROR;
8582 8614
8583 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8615 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8584 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8616 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8585 if (unlikely(!phba->sli4_hba.fcp_wq)) 8617 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
8586 return IOCB_ERROR; 8618 LPFC_IO_OAS))) {
8587 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8619 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8588 &wqe)) 8620 } else {
8621 wq = phba->sli4_hba.oas_wq;
8622 }
8623 if (lpfc_sli4_wq_put(wq, &wqe))
8589 return IOCB_ERROR; 8624 return IOCB_ERROR;
8590 } else { 8625 } else {
8591 if (unlikely(!phba->sli4_hba.els_wq)) 8626 if (unlikely(!phba->sli4_hba.els_wq))
@@ -8675,12 +8710,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8675 8710
8676 if (phba->sli_rev == LPFC_SLI_REV4) { 8711 if (phba->sli_rev == LPFC_SLI_REV4) {
8677 if (piocb->iocb_flag & LPFC_IO_FCP) { 8712 if (piocb->iocb_flag & LPFC_IO_FCP) {
8678 if (unlikely(!phba->sli4_hba.fcp_wq)) 8713 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
8679 return IOCB_ERROR; 8714 LPFC_IO_OAS))) {
8680 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8715 if (unlikely(!phba->sli4_hba.fcp_wq))
8681 piocb->fcp_wqidx = idx; 8716 return IOCB_ERROR;
8682 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8717 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8683 8718 piocb->fcp_wqidx = idx;
8719 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8720 } else {
8721 if (unlikely(!phba->sli4_hba.oas_wq))
8722 return IOCB_ERROR;
8723 idx = 0;
8724 piocb->fcp_wqidx = 0;
8725 ring_number = LPFC_FCP_OAS_RING;
8726 }
8684 pring = &phba->sli.ring[ring_number]; 8727 pring = &phba->sli.ring[ring_number];
8685 spin_lock_irqsave(&pring->ring_lock, iflags); 8728 spin_lock_irqsave(&pring->ring_lock, iflags);
8686 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8729 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12138,6 +12181,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12138 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12181 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12139} 12182}
12140 12183
12184
12185/**
12186 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12187 * entry
12188 * @phba: Pointer to HBA context object.
12189 * @eqe: Pointer to fast-path event queue entry.
12190 *
12191 * This routine process a event queue entry from the Flash Optimized Fabric
12192 * event queue. It will check the MajorCode and MinorCode to determine this
12193 * is for a completion event on a completion queue, if not, an error shall be
12194 * logged and just return. Otherwise, it will get to the corresponding
12195 * completion queue and process all the entries on the completion queue, rearm
12196 * the completion queue, and then return.
12197 **/
12198static void
12199lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12200{
12201 struct lpfc_queue *cq;
12202 struct lpfc_cqe *cqe;
12203 bool workposted = false;
12204 uint16_t cqid;
12205 int ecount = 0;
12206
12207 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12209 "9147 Not a valid completion "
12210 "event: majorcode=x%x, minorcode=x%x\n",
12211 bf_get_le32(lpfc_eqe_major_code, eqe),
12212 bf_get_le32(lpfc_eqe_minor_code, eqe));
12213 return;
12214 }
12215
12216 /* Get the reference to the corresponding CQ */
12217 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12218
12219 /* Next check for OAS */
12220 cq = phba->sli4_hba.oas_cq;
12221 if (unlikely(!cq)) {
12222 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12224 "9148 OAS completion queue "
12225 "does not exist\n");
12226 return;
12227 }
12228
12229 if (unlikely(cqid != cq->queue_id)) {
12230 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12231 "9149 Miss-matched fast-path compl "
12232 "queue id: eqcqid=%d, fcpcqid=%d\n",
12233 cqid, cq->queue_id);
12234 return;
12235 }
12236
12237 /* Process all the entries to the OAS CQ */
12238 while ((cqe = lpfc_sli4_cq_get(cq))) {
12239 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12240 if (!(++ecount % cq->entry_repost))
12241 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12242 }
12243
12244 /* Track the max number of CQEs processed in 1 EQ */
12245 if (ecount > cq->CQ_max_cqe)
12246 cq->CQ_max_cqe = ecount;
12247
12248 /* Catch the no cq entry condition */
12249 if (unlikely(ecount == 0))
12250 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12251 "9153 No entry from fast-path completion "
12252 "queue fcpcqid=%d\n", cq->queue_id);
12253
12254 /* In any case, flash and re-arm the CQ */
12255 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12256
12257 /* wake up worker thread if there are works to be done */
12258 if (workposted)
12259 lpfc_worker_wake_up(phba);
12260}
12261
12262/**
12263 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12264 * @irq: Interrupt number.
12265 * @dev_id: The device context pointer.
12266 *
12267 * This function is directly called from the PCI layer as an interrupt
12268 * service routine when device with SLI-4 interface spec is enabled with
12269 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12270 * IOCB ring event in the HBA. However, when the device is enabled with either
12271 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12272 * device-level interrupt handler. When the PCI slot is in error recovery
12273 * or the HBA is undergoing initialization, the interrupt handler will not
12274 * process the interrupt. The Flash Optimized Fabric ring event are handled in
12275 * the intrrupt context. This function is called without any lock held.
12276 * It gets the hbalock to access and update SLI data structures. Note that,
12277 * the EQ to CQ are one-to-one map such that the EQ index is
12278 * equal to that of CQ index.
12279 *
12280 * This function returns IRQ_HANDLED when interrupt is handled else it
12281 * returns IRQ_NONE.
12282 **/
12283irqreturn_t
12284lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12285{
12286 struct lpfc_hba *phba;
12287 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12288 struct lpfc_queue *eq;
12289 struct lpfc_eqe *eqe;
12290 unsigned long iflag;
12291 int ecount = 0;
12292 uint32_t eqidx;
12293
12294 /* Get the driver's phba structure from the dev_id */
12295 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12296 phba = fcp_eq_hdl->phba;
12297 eqidx = fcp_eq_hdl->idx;
12298
12299 if (unlikely(!phba))
12300 return IRQ_NONE;
12301
12302 /* Get to the EQ struct associated with this vector */
12303 eq = phba->sli4_hba.fof_eq;
12304 if (unlikely(!eq))
12305 return IRQ_NONE;
12306
12307 /* Check device state for handling interrupt */
12308 if (unlikely(lpfc_intr_state_check(phba))) {
12309 eq->EQ_badstate++;
12310 /* Check again for link_state with lock held */
12311 spin_lock_irqsave(&phba->hbalock, iflag);
12312 if (phba->link_state < LPFC_LINK_DOWN)
12313 /* Flush, clear interrupt, and rearm the EQ */
12314 lpfc_sli4_eq_flush(phba, eq);
12315 spin_unlock_irqrestore(&phba->hbalock, iflag);
12316 return IRQ_NONE;
12317 }
12318
12319 /*
12320 * Process all the event on FCP fast-path EQ
12321 */
12322 while ((eqe = lpfc_sli4_eq_get(eq))) {
12323 lpfc_sli4_fof_handle_eqe(phba, eqe);
12324 if (!(++ecount % eq->entry_repost))
12325 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12326 eq->EQ_processed++;
12327 }
12328
12329 /* Track the max number of EQEs processed in 1 intr */
12330 if (ecount > eq->EQ_max_eqe)
12331 eq->EQ_max_eqe = ecount;
12332
12333
12334 if (unlikely(ecount == 0)) {
12335 eq->EQ_no_entry++;
12336
12337 if (phba->intr_type == MSIX)
12338 /* MSI-X treated interrupt served as no EQ share INT */
12339 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12340 "9145 MSI-X interrupt with no EQE\n");
12341 else {
12342 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12343 "9146 ISR interrupt with no EQE\n");
12344 /* Non MSI-X treated on interrupt as EQ share INT */
12345 return IRQ_NONE;
12346 }
12347 }
12348 /* Always clear and re-arm the fast-path EQ */
12349 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12350 return IRQ_HANDLED;
12351}
12352
12141/** 12353/**
12142 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 12354 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12143 * @irq: Interrupt number. 12355 * @irq: Interrupt number.
@@ -12293,6 +12505,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
12293 hba_handled |= true; 12505 hba_handled |= true;
12294 } 12506 }
12295 12507
12508 if (phba->cfg_fof) {
12509 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12510 &phba->sli4_hba.fcp_eq_hdl[0]);
12511 if (hba_irq_rc == IRQ_HANDLED)
12512 hba_handled |= true;
12513 }
12514
12296 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 12515 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12297} /* lpfc_sli4_intr_handler */ 12516} /* lpfc_sli4_intr_handler */
12298 12517
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f2478706e..6f04080f4ea8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
78#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
79#define LPFC_FIP_ELS_ID_SHIFT 14 79#define LPFC_FIP_ELS_ID_SHIFT 14
80 80
81#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
82
81 uint32_t drvrTimeout; /* driver timeout in seconds */ 83 uint32_t drvrTimeout; /* driver timeout in seconds */
82 uint32_t fcp_wqidx; /* index to FCP work queue */ 84 uint32_t fcp_wqidx; /* index to FCP work queue */
83 struct lpfc_vport *vport;/* virtual port pointer */ 85 struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd1a89d..e43259075e92 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
39#define LPFC_FCP_IO_CHAN_MIN 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FCP_IO_CHAN_MAX 16 40#define LPFC_FCP_IO_CHAN_MAX 16
41 41
42/* Number of channels used for Flash Optimized Fabric (FOF) operations */
43
44#define LPFC_FOF_IO_CHAN_NUM 1
45
42/* 46/*
43 * Provide the default FCF Record attributes used by the driver 47 * Provide the default FCF Record attributes used by the driver
44 * when nonFIP mode is configured and there is no other default 48 * when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
399 uint32_t if_page_sz; 403 uint32_t if_page_sz;
400 uint32_t rq_db_window; 404 uint32_t rq_db_window;
401 uint32_t loopbk_scope; 405 uint32_t loopbk_scope;
406 uint32_t oas_supported;
402 uint32_t eq_pages_max; 407 uint32_t eq_pages_max;
403 uint32_t eqe_size; 408 uint32_t eqe_size;
404 uint32_t cq_pages_max; 409 uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
439 uint8_t lnk_no; 444 uint8_t lnk_no;
440}; 445};
441 446
447#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
448 LPFC_FOF_IO_CHAN_NUM)
442#define LPFC_SLI4_HANDLER_NAME_SZ 16 449#define LPFC_SLI4_HANDLER_NAME_SZ 16
443 450
444/* Used for IRQ vector to CPU mapping */ 451/* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
507 struct lpfc_register sli_intf; 514 struct lpfc_register sli_intf;
508 struct lpfc_pc_sli4_params pc_sli4_params; 515 struct lpfc_pc_sli4_params pc_sli4_params;
509 struct msix_entry *msix_entries; 516 struct msix_entry *msix_entries;
510 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; 517 uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
511 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 518 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
512 519
513 /* Pointers to the constructed SLI4 queues */ 520 /* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
527 uint32_t ulp0_mode; /* ULP0 protocol mode */ 534 uint32_t ulp0_mode; /* ULP0 protocol mode */
528 uint32_t ulp1_mode; /* ULP1 protocol mode */ 535 uint32_t ulp1_mode; /* ULP1 protocol mode */
529 536
537 struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
538
539 /* Optimized Access Storage specific queues/structures */
540
541 struct lpfc_queue *oas_cq; /* OAS completion queue */
542 struct lpfc_queue *oas_wq; /* OAS Work queue */
543 struct lpfc_sli_ring *oas_ring;
544 uint64_t oas_next_lun;
545 uint8_t oas_next_tgt_wwpn[8];
546 uint8_t oas_next_vpt_wwpn[8];
547
530 /* Setup information for various queue parameters */ 548 /* Setup information for various queue parameters */
531 int eq_esize; 549 int eq_esize;
532 int eq_ecount; 550 int eq_ecount;