aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/fcoe
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 16:31:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 16:31:23 -0400
commita75ee6ecd411a50bf4da927c2fdb2cb56246a2bd (patch)
treefcb06e1940152b115901fda68e7eea1cc1196ff3 /drivers/scsi/fcoe
parentc9651e70ad0aa499814817cbf3cc1d0b806ed3a1 (diff)
parent699316948628dab9e813c415640fe5b9f65cd5e3 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
Pull SCSI updates from James Bottomley: "This is primarily another round of driver updates (lpfc, bfa, fcoe, ipr) plus a new ufshcd driver. There shouldn't be anything controversial in here (The final deletion of scsi proc_ops which caused some build breakage has been held over until the next merge window to give us more time to stabilise it). I'm afraid, with me moving continents at exactly the wrong time, anything submitted after the merge window opened has been held over to the next merge window." * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (63 commits) [SCSI] ipr: Driver version 2.5.3 [SCSI] ipr: Increase alignment boundary of command blocks [SCSI] ipr: Increase max concurrent oustanding commands [SCSI] ipr: Remove unnecessary memory barriers [SCSI] ipr: Remove unnecessary interrupt clearing on new adapters [SCSI] ipr: Fix target id allocation re-use problem [SCSI] atp870u, mpt2sas, qla4xxx use pci_dev->revision [SCSI] fcoe: Drop the rtnl_mutex before calling fcoe_ctlr_link_up [SCSI] bfa: Update the driver version to 3.0.23.0 [SCSI] bfa: BSG and User interface fixes. [SCSI] bfa: Fix to avoid vport delete hang on request queue full scenario. [SCSI] bfa: Move service parameter programming logic into firmware. [SCSI] bfa: Revised Fabric Assigned Address(FAA) feature implementation. [SCSI] bfa: Flash controller IOC pll init fixes. [SCSI] bfa: Serialize the IOC hw semaphore unlock logic. [SCSI] bfa: Modify ISR to process pending completions [SCSI] bfa: Add fc host issue lip support [SCSI] mpt2sas: remove extraneous sas_log_info messages [SCSI] libfc: fcoe_transport_create fails in single-CPU environment [SCSI] fcoe: reduce contention for fcoe_rx_list lock [v2] ...
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r--drivers/scsi/fcoe/fcoe.c83
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c38
2 files changed, 63 insertions, 58 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ae7d15c44e2a..335e85192807 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1436 goto err; 1436 goto err;
1437 1437
1438 fps = &per_cpu(fcoe_percpu, cpu); 1438 fps = &per_cpu(fcoe_percpu, cpu);
1439 spin_lock_bh(&fps->fcoe_rx_list.lock); 1439 spin_lock(&fps->fcoe_rx_list.lock);
1440 if (unlikely(!fps->thread)) { 1440 if (unlikely(!fps->thread)) {
1441 /* 1441 /*
1442 * The targeted CPU is not ready, let's target 1442 * The targeted CPU is not ready, let's target
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1447 "ready for incoming skb- using first online " 1447 "ready for incoming skb- using first online "
1448 "CPU.\n"); 1448 "CPU.\n");
1449 1449
1450 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1450 spin_unlock(&fps->fcoe_rx_list.lock);
1451 cpu = cpumask_first(cpu_online_mask); 1451 cpu = cpumask_first(cpu_online_mask);
1452 fps = &per_cpu(fcoe_percpu, cpu); 1452 fps = &per_cpu(fcoe_percpu, cpu);
1453 spin_lock_bh(&fps->fcoe_rx_list.lock); 1453 spin_lock(&fps->fcoe_rx_list.lock);
1454 if (!fps->thread) { 1454 if (!fps->thread) {
1455 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1455 spin_unlock(&fps->fcoe_rx_list.lock);
1456 goto err; 1456 goto err;
1457 } 1457 }
1458 } 1458 }
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1463 * so we're free to queue skbs into it's queue. 1463 * so we're free to queue skbs into it's queue.
1464 */ 1464 */
1465 1465
1466 /* If this is a SCSI-FCP frame, and this is already executing on the 1466 /*
1467 * correct CPU, and the queue for this CPU is empty, then go ahead 1467 * Note: We used to have a set of conditions under which we would
1468 * and process the frame directly in the softirq context. 1468 * call fcoe_recv_frame directly, rather than queuing to the rx list
1469 * This lets us process completions without context switching from the 1469 * as it could save a few cycles, but doing so is prohibited, as
1470 * NET_RX softirq, to our receive processing thread, and then back to 1470 * fcoe_recv_frame has several paths that may sleep, which is forbidden
1471 * BLOCK softirq context. 1471 * in softirq context.
1472 */ 1472 */
1473 if (fh->fh_type == FC_TYPE_FCP && 1473 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1474 cpu == smp_processor_id() && 1474 if (fps->thread->state == TASK_INTERRUPTIBLE)
1475 skb_queue_empty(&fps->fcoe_rx_list)) { 1475 wake_up_process(fps->thread);
1476 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1476 spin_unlock(&fps->fcoe_rx_list.lock);
1477 fcoe_recv_frame(skb);
1478 } else {
1479 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1480 if (fps->fcoe_rx_list.qlen == 1)
1481 wake_up_process(fps->thread);
1482 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1483 }
1484 1477
1485 return 0; 1478 return 0;
1486err: 1479err:
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
1797{ 1790{
1798 struct fcoe_percpu_s *p = arg; 1791 struct fcoe_percpu_s *p = arg;
1799 struct sk_buff *skb; 1792 struct sk_buff *skb;
1793 struct sk_buff_head tmp;
1794
1795 skb_queue_head_init(&tmp);
1800 1796
1801 set_user_nice(current, -20); 1797 set_user_nice(current, -20);
1802 1798
1803 while (!kthread_should_stop()) { 1799 while (!kthread_should_stop()) {
1804 1800
1805 spin_lock_bh(&p->fcoe_rx_list.lock); 1801 spin_lock_bh(&p->fcoe_rx_list.lock);
1806 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { 1802 skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
1803 spin_unlock_bh(&p->fcoe_rx_list.lock);
1804
1805 while ((skb = __skb_dequeue(&tmp)) != NULL)
1806 fcoe_recv_frame(skb);
1807
1808 spin_lock_bh(&p->fcoe_rx_list.lock);
1809 if (!skb_queue_len(&p->fcoe_rx_list)) {
1807 set_current_state(TASK_INTERRUPTIBLE); 1810 set_current_state(TASK_INTERRUPTIBLE);
1808 spin_unlock_bh(&p->fcoe_rx_list.lock); 1811 spin_unlock_bh(&p->fcoe_rx_list.lock);
1809 schedule(); 1812 schedule();
1810 set_current_state(TASK_RUNNING); 1813 set_current_state(TASK_RUNNING);
1811 if (kthread_should_stop()) 1814 } else
1812 return 0; 1815 spin_unlock_bh(&p->fcoe_rx_list.lock);
1813 spin_lock_bh(&p->fcoe_rx_list.lock);
1814 }
1815 spin_unlock_bh(&p->fcoe_rx_list.lock);
1816 fcoe_recv_frame(skb);
1817 } 1816 }
1818 return 0; 1817 return 0;
1819} 1818}
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2187 /* start FIP Discovery and FLOGI */ 2186 /* start FIP Discovery and FLOGI */
2188 lport->boot_time = jiffies; 2187 lport->boot_time = jiffies;
2189 fc_fabric_login(lport); 2188 fc_fabric_login(lport);
2190 if (!fcoe_link_ok(lport)) 2189 if (!fcoe_link_ok(lport)) {
2190 rtnl_unlock();
2191 fcoe_ctlr_link_up(&fcoe->ctlr); 2191 fcoe_ctlr_link_up(&fcoe->ctlr);
2192 mutex_unlock(&fcoe_config_mutex);
2193 return rc;
2194 }
2192 2195
2193out_nodev: 2196out_nodev:
2194 rtnl_unlock(); 2197 rtnl_unlock();
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport)
2261static void fcoe_percpu_clean(struct fc_lport *lport) 2264static void fcoe_percpu_clean(struct fc_lport *lport)
2262{ 2265{
2263 struct fcoe_percpu_s *pp; 2266 struct fcoe_percpu_s *pp;
2264 struct fcoe_rcv_info *fr; 2267 struct sk_buff *skb;
2265 struct sk_buff_head *list;
2266 struct sk_buff *skb, *next;
2267 struct sk_buff *head;
2268 unsigned int cpu; 2268 unsigned int cpu;
2269 2269
2270 for_each_possible_cpu(cpu) { 2270 for_each_possible_cpu(cpu) {
2271 pp = &per_cpu(fcoe_percpu, cpu); 2271 pp = &per_cpu(fcoe_percpu, cpu);
2272 spin_lock_bh(&pp->fcoe_rx_list.lock);
2273 list = &pp->fcoe_rx_list;
2274 head = list->next;
2275 for (skb = head; skb != (struct sk_buff *)list;
2276 skb = next) {
2277 next = skb->next;
2278 fr = fcoe_dev_from_skb(skb);
2279 if (fr->fr_dev == lport) {
2280 __skb_unlink(skb, list);
2281 kfree_skb(skb);
2282 }
2283 }
2284 2272
2285 if (!pp->thread || !cpu_online(cpu)) { 2273 if (!pp->thread || !cpu_online(cpu))
2286 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2287 continue; 2274 continue;
2288 }
2289 2275
2290 skb = dev_alloc_skb(0); 2276 skb = dev_alloc_skb(0);
2291 if (!skb) { 2277 if (!skb) {
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
2294 } 2280 }
2295 skb->destructor = fcoe_percpu_flush_done; 2281 skb->destructor = fcoe_percpu_flush_done;
2296 2282
2283 spin_lock_bh(&pp->fcoe_rx_list.lock);
2297 __skb_queue_tail(&pp->fcoe_rx_list, skb); 2284 __skb_queue_tail(&pp->fcoe_rx_list, skb);
2298 if (pp->fcoe_rx_list.qlen == 1) 2285 if (pp->fcoe_rx_list.qlen == 1)
2299 wake_up_process(pp->thread); 2286 wake_up_process(pp->thread);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index e7522dcc296e..249a106888d9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
242 printk(KERN_INFO "libfcoe: host%d: FIP selected " 242 printk(KERN_INFO "libfcoe: host%d: FIP selected "
243 "Fibre-Channel Forwarder MAC %pM\n", 243 "Fibre-Channel Forwarder MAC %pM\n",
244 fip->lp->host->host_no, sel->fcf_mac); 244 fip->lp->host->host_no, sel->fcf_mac);
245 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); 245 memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
246 fip->map_dest = 0; 246 fip->map_dest = 0;
247 } 247 }
248unlock: 248unlock:
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
824 memcpy(fcf->fcf_mac, 824 memcpy(fcf->fcf_mac,
825 ((struct fip_mac_desc *)desc)->fd_mac, 825 ((struct fip_mac_desc *)desc)->fd_mac,
826 ETH_ALEN); 826 ETH_ALEN);
827 memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
827 if (!is_valid_ether_addr(fcf->fcf_mac)) { 828 if (!is_valid_ether_addr(fcf->fcf_mac)) {
828 LIBFCOE_FIP_DBG(fip, 829 LIBFCOE_FIP_DBG(fip,
829 "Invalid MAC addr %pM in FIP adv\n", 830 "Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1013 struct fip_desc *desc; 1014 struct fip_desc *desc;
1014 struct fip_encaps *els; 1015 struct fip_encaps *els;
1015 struct fcoe_dev_stats *stats; 1016 struct fcoe_dev_stats *stats;
1017 struct fcoe_fcf *sel;
1016 enum fip_desc_type els_dtype = 0; 1018 enum fip_desc_type els_dtype = 0;
1017 u8 els_op; 1019 u8 els_op;
1018 u8 sub; 1020 u8 sub;
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1040 goto drop; 1042 goto drop;
1041 /* Drop ELS if there are duplicate critical descriptors */ 1043 /* Drop ELS if there are duplicate critical descriptors */
1042 if (desc->fip_dtype < 32) { 1044 if (desc->fip_dtype < 32) {
1043 if (desc_mask & 1U << desc->fip_dtype) { 1045 if ((desc->fip_dtype != FIP_DT_MAC) &&
1046 (desc_mask & 1U << desc->fip_dtype)) {
1044 LIBFCOE_FIP_DBG(fip, "Duplicate Critical " 1047 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1045 "Descriptors in FIP ELS\n"); 1048 "Descriptors in FIP ELS\n");
1046 goto drop; 1049 goto drop;
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1049 } 1052 }
1050 switch (desc->fip_dtype) { 1053 switch (desc->fip_dtype) {
1051 case FIP_DT_MAC: 1054 case FIP_DT_MAC:
1055 sel = fip->sel_fcf;
1052 if (desc_cnt == 1) { 1056 if (desc_cnt == 1) {
1053 LIBFCOE_FIP_DBG(fip, "FIP descriptors " 1057 LIBFCOE_FIP_DBG(fip, "FIP descriptors "
1054 "received out of order\n"); 1058 "received out of order\n");
1055 goto drop; 1059 goto drop;
1056 } 1060 }
1061 /*
1062 * Some switch implementations send two MAC descriptors,
1063 * with first MAC(granted_mac) being the FPMA, and the
1064 * second one(fcoe_mac) is used as destination address
1065 * for sending/receiving FCoE packets. FIP traffic is
1066 * sent using fip_mac. For regular switches, both
1067 * fip_mac and fcoe_mac would be the same.
1068 */
1069 if (desc_cnt == 2)
1070 memcpy(granted_mac,
1071 ((struct fip_mac_desc *)desc)->fd_mac,
1072 ETH_ALEN);
1057 1073
1058 if (dlen != sizeof(struct fip_mac_desc)) 1074 if (dlen != sizeof(struct fip_mac_desc))
1059 goto len_err; 1075 goto len_err;
1060 memcpy(granted_mac, 1076
1061 ((struct fip_mac_desc *)desc)->fd_mac, 1077 if ((desc_cnt == 3) && (sel))
1062 ETH_ALEN); 1078 memcpy(sel->fcoe_mac,
1079 ((struct fip_mac_desc *)desc)->fd_mac,
1080 ETH_ALEN);
1063 break; 1081 break;
1064 case FIP_DT_FLOGI: 1082 case FIP_DT_FLOGI:
1065 case FIP_DT_FDISC: 1083 case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1273 * No Vx_Port description. Clear all NPIV ports, 1291 * No Vx_Port description. Clear all NPIV ports,
1274 * followed by physical port 1292 * followed by physical port
1275 */ 1293 */
1276 mutex_lock(&lport->lp_mutex);
1277 list_for_each_entry(vn_port, &lport->vports, list)
1278 fc_lport_reset(vn_port);
1279 mutex_unlock(&lport->lp_mutex);
1280
1281 mutex_lock(&fip->ctlr_mutex); 1294 mutex_lock(&fip->ctlr_mutex);
1282 per_cpu_ptr(lport->dev_stats, 1295 per_cpu_ptr(lport->dev_stats,
1283 get_cpu())->VLinkFailureCount++; 1296 get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1285 fcoe_ctlr_reset(fip); 1298 fcoe_ctlr_reset(fip);
1286 mutex_unlock(&fip->ctlr_mutex); 1299 mutex_unlock(&fip->ctlr_mutex);
1287 1300
1301 mutex_lock(&lport->lp_mutex);
1302 list_for_each_entry(vn_port, &lport->vports, list)
1303 fc_lport_reset(vn_port);
1304 mutex_unlock(&lport->lp_mutex);
1305
1288 fc_lport_reset(fip->lp); 1306 fc_lport_reset(fip->lp);
1289 fcoe_ctlr_solicit(fip, NULL); 1307 fcoe_ctlr_solicit(fip, NULL);
1290 } else { 1308 } else {