diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-30 14:36:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-30 14:36:02 -0400 |
commit | 6c6e3b828b2a13b923b9465fc4316c5bdc92291f (patch) | |
tree | ca027f7d7645c577ed76fcc8358163eb1689d8ae /drivers/scsi/fcoe | |
parent | c11abbbaa3252875c5740a6880b9a1a6f1e2a870 (diff) | |
parent | d272281c390eb6c3f1e70ed0337c9e619d99cd9c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (71 commits)
[SCSI] fcoe: cleanup cpu selection for incoming requests
[SCSI] fcoe: add fip retry to avoid missing critical keep alive
[SCSI] libfc: fix warn on in lport retry
[SCSI] libfc: Remove the reference to FCP packet from scsi_cmnd in case of error
[SCSI] libfc: cleanup sending SRR request
[SCSI] libfc: two minor changes in comments
[SCSI] libfc, fcoe: ignore rx frame with wrong xid info
[SCSI] libfc: release exchg cache
[SCSI] libfc: use FC_MAX_ERROR_CNT
[SCSI] fcoe: remove unused ptype field in fcoe_rcv_info
[SCSI] bnx2fc: Update copyright and bump version to 1.0.4
[SCSI] bnx2fc: Tx BDs cache in write tasks
[SCSI] bnx2fc: Do not arm CQ when there are no CQEs
[SCSI] bnx2fc: hold tgt lock when calling cmd_release
[SCSI] bnx2fc: Enable support for sequence level error recovery
[SCSI] bnx2fc: HSI changes for tape
[SCSI] bnx2fc: Handle REC_TOV error code from firmware
[SCSI] bnx2fc: REC/SRR link service request and response handling
[SCSI] bnx2fc: Support 'sequence cleanup' task
[SCSI] dh_rdac: Associate HBA and storage in rdac_controller to support partitions in storage
...
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r-- | drivers/scsi/fcoe/fcoe.c | 69 |
1 files changed, 32 insertions, 37 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 204fa8d4b4ab..ba710e350ac5 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -487,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, | |||
487 | } | 487 | } |
488 | 488 | ||
489 | /** | 489 | /** |
490 | * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame | ||
491 | * @port: The FCoE port | ||
492 | * @skb: The FIP/FCoE packet to be sent | ||
493 | */ | ||
494 | static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) | ||
495 | { | ||
496 | if (port->fcoe_pending_queue.qlen) | ||
497 | fcoe_check_wait_queue(port->lport, skb); | ||
498 | else if (fcoe_start_io(skb)) | ||
499 | fcoe_check_wait_queue(port->lport, skb); | ||
500 | } | ||
501 | |||
502 | /** | ||
490 | * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame | 503 | * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame |
491 | * @fip: The FCoE controller | 504 | * @fip: The FCoE controller |
492 | * @skb: The FIP packet to be sent | 505 | * @skb: The FIP packet to be sent |
@@ -494,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, | |||
494 | static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | 507 | static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) |
495 | { | 508 | { |
496 | skb->dev = fcoe_from_ctlr(fip)->netdev; | 509 | skb->dev = fcoe_from_ctlr(fip)->netdev; |
497 | dev_queue_xmit(skb); | 510 | fcoe_port_send(lport_priv(fip->lp), skb); |
498 | } | 511 | } |
499 | 512 | ||
500 | /** | 513 | /** |
@@ -1257,30 +1270,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, | |||
1257 | /** | 1270 | /** |
1258 | * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming | 1271 | * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming |
1259 | * command. | 1272 | * command. |
1260 | * @curr_cpu: CPU which received request | ||
1261 | * | 1273 | * |
1262 | * This routine selects next CPU based on cpumask. | 1274 | * This routine selects next CPU based on cpumask to distribute |
1275 | * incoming requests in round robin. | ||
1263 | * | 1276 | * |
1264 | * Returns: int (CPU number). Caller to verify if returned CPU is online or not. | 1277 | * Returns: int CPU number |
1265 | */ | 1278 | */ |
1266 | static unsigned int fcoe_select_cpu(unsigned int curr_cpu) | 1279 | static inline unsigned int fcoe_select_cpu(void) |
1267 | { | 1280 | { |
1268 | static unsigned int selected_cpu; | 1281 | static unsigned int selected_cpu; |
1269 | 1282 | ||
1270 | if (num_online_cpus() == 1) | 1283 | selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); |
1271 | return curr_cpu; | 1284 | if (selected_cpu >= nr_cpu_ids) |
1272 | /* | 1285 | selected_cpu = cpumask_first(cpu_online_mask); |
1273 | * Doing following check, to skip "curr_cpu (smp_processor_id)" | 1286 | |
1274 | * from selection of CPU is intentional. This is to avoid same CPU | ||
1275 | * doing post-processing of command. "curr_cpu" to just receive | ||
1276 | * incoming request in case where rx_id is UNKNOWN and all other | ||
1277 | * CPU to actually process the command(s) | ||
1278 | */ | ||
1279 | do { | ||
1280 | selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); | ||
1281 | if (selected_cpu >= nr_cpu_ids) | ||
1282 | selected_cpu = cpumask_first(cpu_online_mask); | ||
1283 | } while (selected_cpu == curr_cpu); | ||
1284 | return selected_cpu; | 1287 | return selected_cpu; |
1285 | } | 1288 | } |
1286 | 1289 | ||
@@ -1350,30 +1353,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1350 | 1353 | ||
1351 | fr = fcoe_dev_from_skb(skb); | 1354 | fr = fcoe_dev_from_skb(skb); |
1352 | fr->fr_dev = lport; | 1355 | fr->fr_dev = lport; |
1353 | fr->ptype = ptype; | ||
1354 | 1356 | ||
1355 | /* | 1357 | /* |
1356 | * In case the incoming frame's exchange is originated from | 1358 | * In case the incoming frame's exchange is originated from |
1357 | * the initiator, then received frame's exchange id is ANDed | 1359 | * the initiator, then received frame's exchange id is ANDed |
1358 | * with fc_cpu_mask bits to get the same cpu on which exchange | 1360 | * with fc_cpu_mask bits to get the same cpu on which exchange |
1359 | * was originated, otherwise just use the current cpu. | 1361 | * was originated, otherwise select cpu using rx exchange id |
1362 | * or fcoe_select_cpu(). | ||
1360 | */ | 1363 | */ |
1361 | if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) | 1364 | if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) |
1362 | cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; | 1365 | cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; |
1363 | else { | 1366 | else { |
1364 | cpu = smp_processor_id(); | 1367 | if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) |
1365 | 1368 | cpu = fcoe_select_cpu(); | |
1366 | if ((fh->fh_type == FC_TYPE_FCP) && | 1369 | else |
1367 | (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) { | ||
1368 | do { | ||
1369 | cpu = fcoe_select_cpu(cpu); | ||
1370 | } while (!cpu_online(cpu)); | ||
1371 | } else if ((fh->fh_type == FC_TYPE_FCP) && | ||
1372 | (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) { | ||
1373 | cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; | 1370 | cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; |
1374 | } else | ||
1375 | cpu = smp_processor_id(); | ||
1376 | } | 1371 | } |
1372 | |||
1373 | if (cpu >= nr_cpu_ids) | ||
1374 | goto err; | ||
1375 | |||
1377 | fps = &per_cpu(fcoe_percpu, cpu); | 1376 | fps = &per_cpu(fcoe_percpu, cpu); |
1378 | spin_lock_bh(&fps->fcoe_rx_list.lock); | 1377 | spin_lock_bh(&fps->fcoe_rx_list.lock); |
1379 | if (unlikely(!fps->thread)) { | 1378 | if (unlikely(!fps->thread)) { |
@@ -1572,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) | |||
1572 | 1571 | ||
1573 | /* send down to lld */ | 1572 | /* send down to lld */ |
1574 | fr_dev(fp) = lport; | 1573 | fr_dev(fp) = lport; |
1575 | if (port->fcoe_pending_queue.qlen) | 1574 | fcoe_port_send(port, skb); |
1576 | fcoe_check_wait_queue(lport, skb); | ||
1577 | else if (fcoe_start_io(skb)) | ||
1578 | fcoe_check_wait_queue(lport, skb); | ||
1579 | |||
1580 | return 0; | 1575 | return 0; |
1581 | } | 1576 | } |
1582 | 1577 | ||