aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVasu Dev <vasu.dev@intel.com>2009-08-25 16:58:53 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-09-05 10:47:37 -0400
commitb2f0091fbf8b475fa09b5e1712e0ab84cb3e1ca4 (patch)
tree98d3427aaae0b3d20d5fd077023b4ea23f5b575c /drivers
parente4bc50bedf0dd6c63f20a7bc0a2b46667664fba1 (diff)
[SCSI] fcoe, libfc: fully makes use of per cpu exch pool and then removes em_lock
1. Updates fcoe_rcv() to queue incoming frames to the fcoe per cpu thread on which this frame's exch was originated and simply use current cpu for request exch not originated by initiator. It is redundant to add this code under CONFIG_SMP, so removes CONFIG_SMP uses around this code. 2. Updates fc_exch_em_alloc, fc_exch_delete, fc_exch_find to use per cpu exch pools, here fc_exch_delete is rename of older fc_exch_mgr_delete_ep since ep/exch are now deleted in pools of EM and so brief new name is sufficient and better name. Updates these functions to map exch id to their index into exch pool using fc_cpu_mask, fc_cpu_order and EM min_xid. This mapping is as per detailed explanation about this in last patch and basically this is just as lower fc_cpu_mask bits of exch id as cpu number and upper bit sum of EM min_xid and exch index in pool. Uses pool next_index to keep track of exch allocation from pool along with pool_max_index as upper bound of exches array in pool. 3. Adds exch pool ptr to fc_exch to free exch to its pool in fc_exch_delete. 4. Updates fc_exch_mgr_reset to reset all exch pools of an EM, this required adding fc_exch_pool_reset func to reset exches in pool and then have fc_exch_mgr_reset call fc_exch_pool_reset for each pool within each EM for a lport. 5. Removes no longer needed exches array, em_lock, next_xid, and total_exches from struct fc_exch_mgr, these are not needed after use of per cpu exch pool, also removes not used max_read, last_read from struct fc_exch_mgr. 6. Updates locking notes for exch pool lock with fc_exch lock and uses pool lock in exch allocation, lookup and reset. Signed-off-by: Vasu Dev <vasu.dev@intel.com> Signed-off-by: Robert Love <robert.w.love@intel.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/fcoe/fcoe.c19
-rw-r--r--drivers/scsi/libfc/fc_exch.c189
2 files changed, 109 insertions, 99 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 757aa28f0f04..e32a0ed266aa 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -912,8 +912,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
912 struct fcoe_softc *fc; 912 struct fcoe_softc *fc;
913 struct fc_frame_header *fh; 913 struct fc_frame_header *fh;
914 struct fcoe_percpu_s *fps; 914 struct fcoe_percpu_s *fps;
915 unsigned short oxid; 915 unsigned int cpu;
916 unsigned int cpu = 0;
917 916
918 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 917 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
919 lp = fc->ctlr.lp; 918 lp = fc->ctlr.lp;
@@ -947,20 +946,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
947 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 946 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
948 fh = (struct fc_frame_header *) skb_transport_header(skb); 947 fh = (struct fc_frame_header *) skb_transport_header(skb);
949 948
950 oxid = ntohs(fh->fh_ox_id);
951
952 fr = fcoe_dev_from_skb(skb); 949 fr = fcoe_dev_from_skb(skb);
953 fr->fr_dev = lp; 950 fr->fr_dev = lp;
954 fr->ptype = ptype; 951 fr->ptype = ptype;
955 952
956#ifdef CONFIG_SMP
957 /* 953 /*
958 * The incoming frame exchange id(oxid) is ANDed with num of online 954 * In case the incoming frame's exchange is originated from
959 * cpu bits to get cpu and then this cpu is used for selecting 955 * the initiator, then received frame's exchange id is ANDed
960 * a per cpu kernel thread from fcoe_percpu. 956 * with fc_cpu_mask bits to get the same cpu on which exchange
957 * was originated, otherwise just use the current cpu.
961 */ 958 */
962 cpu = oxid & (num_online_cpus() - 1); 959 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
963#endif 960 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
961 else
962 cpu = smp_processor_id();
964 963
965 fps = &per_cpu(fcoe_percpu, cpu); 964 fps = &per_cpu(fcoe_percpu, cpu);
966 spin_lock_bh(&fps->fcoe_rx_list.lock); 965 spin_lock_bh(&fps->fcoe_rx_list.lock);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9cbe8d66eb25..b51db15a3876 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -73,14 +73,8 @@ struct fc_exch_pool {
73struct fc_exch_mgr { 73struct fc_exch_mgr {
74 enum fc_class class; /* default class for sequences */ 74 enum fc_class class; /* default class for sequences */
75 struct kref kref; /* exchange mgr reference count */ 75 struct kref kref; /* exchange mgr reference count */
76 spinlock_t em_lock; /* exchange manager lock,
77 must be taken before ex_lock */
78 u16 next_xid; /* next possible free exchange ID */
79 u16 min_xid; /* min exchange ID */ 76 u16 min_xid; /* min exchange ID */
80 u16 max_xid; /* max exchange ID */ 77 u16 max_xid; /* max exchange ID */
81 u16 max_read; /* max exchange ID for read */
82 u16 last_read; /* last xid allocated for read */
83 u32 total_exches; /* total allocated exchanges */
84 struct list_head ex_list; /* allocated exchanges list */ 78 struct list_head ex_list; /* allocated exchanges list */
85 mempool_t *ep_pool; /* reserve ep's */ 79 mempool_t *ep_pool; /* reserve ep's */
86 u16 pool_max_index; /* max exch array index in exch pool */ 80 u16 pool_max_index; /* max exch array index in exch pool */
@@ -99,7 +93,6 @@ struct fc_exch_mgr {
99 atomic_t seq_not_found; 93 atomic_t seq_not_found;
100 atomic_t non_bls_resp; 94 atomic_t non_bls_resp;
101 } stats; 95 } stats;
102 struct fc_exch **exches; /* for exch pointers indexed by xid */
103}; 96};
104#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) 97#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
105 98
@@ -192,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
192 * sequence allocation and deallocation must be locked. 185 * sequence allocation and deallocation must be locked.
193 * - exchange refcnt can be done atomicly without locks. 186 * - exchange refcnt can be done atomicly without locks.
194 * - sequence allocation must be locked by exch lock. 187 * - sequence allocation must be locked by exch lock.
195 * - If the em_lock and ex_lock must be taken at the same time, then the 188 * - If the EM pool lock and ex_lock must be taken at the same time, then the
196 * em_lock must be taken before the ex_lock. 189 * EM pool lock must be taken before the ex_lock.
197 */ 190 */
198 191
199/* 192/*
@@ -335,17 +328,18 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
335 ((struct fc_exch **)(pool + 1))[index] = ep; 328 ((struct fc_exch **)(pool + 1))[index] = ep;
336} 329}
337 330
338static void fc_exch_mgr_delete_ep(struct fc_exch *ep) 331static void fc_exch_delete(struct fc_exch *ep)
339{ 332{
340 struct fc_exch_mgr *mp; 333 struct fc_exch_pool *pool;
341 334
342 mp = ep->em; 335 pool = ep->pool;
343 spin_lock_bh(&mp->em_lock); 336 spin_lock_bh(&pool->lock);
344 WARN_ON(mp->total_exches <= 0); 337 WARN_ON(pool->total_exches <= 0);
345 mp->total_exches--; 338 pool->total_exches--;
346 mp->exches[ep->xid - mp->min_xid] = NULL; 339 fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
340 NULL);
347 list_del(&ep->ex_list); 341 list_del(&ep->ex_list);
348 spin_unlock_bh(&mp->em_lock); 342 spin_unlock_bh(&pool->lock);
349 fc_exch_release(ep); /* drop hold for exch in mp */ 343 fc_exch_release(ep); /* drop hold for exch in mp */
350} 344}
351 345
@@ -465,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work)
465 rc = fc_exch_done_locked(ep); 459 rc = fc_exch_done_locked(ep);
466 spin_unlock_bh(&ep->ex_lock); 460 spin_unlock_bh(&ep->ex_lock);
467 if (!rc) 461 if (!rc)
468 fc_exch_mgr_delete_ep(ep); 462 fc_exch_delete(ep);
469 if (resp) 463 if (resp)
470 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 464 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
471 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 465 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
@@ -509,10 +503,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
509 struct fc_exch_mgr *mp) 503 struct fc_exch_mgr *mp)
510{ 504{
511 struct fc_exch *ep; 505 struct fc_exch *ep;
512 u16 min, max, xid; 506 unsigned int cpu;
513 507 u16 index;
514 min = mp->min_xid; 508 struct fc_exch_pool *pool;
515 max = mp->max_xid;
516 509
517 /* allocate memory for exchange */ 510 /* allocate memory for exchange */
518 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); 511 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
@@ -522,15 +515,17 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
522 } 515 }
523 memset(ep, 0, sizeof(*ep)); 516 memset(ep, 0, sizeof(*ep));
524 517
525 spin_lock_bh(&mp->em_lock); 518 cpu = smp_processor_id();
526 xid = mp->next_xid; 519 pool = per_cpu_ptr(mp->pool, cpu);
527 /* alloc a new xid */ 520 spin_lock_bh(&pool->lock);
528 while (mp->exches[xid - min]) { 521 index = pool->next_index;
529 xid = (xid == max) ? min : xid + 1; 522 /* allocate new exch from pool */
530 if (xid == mp->next_xid) 523 while (fc_exch_ptr_get(pool, index)) {
524 index = index == mp->pool_max_index ? 0 : index + 1;
525 if (index == pool->next_index)
531 goto err; 526 goto err;
532 } 527 }
533 mp->next_xid = (xid == max) ? min : xid + 1; 528 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
534 529
535 fc_exch_hold(ep); /* hold for exch in mp */ 530 fc_exch_hold(ep); /* hold for exch in mp */
536 spin_lock_init(&ep->ex_lock); 531 spin_lock_init(&ep->ex_lock);
@@ -541,17 +536,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
541 */ 536 */
542 spin_lock_bh(&ep->ex_lock); 537 spin_lock_bh(&ep->ex_lock);
543 538
544 mp->exches[xid - mp->min_xid] = ep; 539 fc_exch_ptr_set(pool, index, ep);
545 list_add_tail(&ep->ex_list, &mp->ex_list); 540 list_add_tail(&ep->ex_list, &pool->ex_list);
546 fc_seq_alloc(ep, ep->seq_id++); 541 fc_seq_alloc(ep, ep->seq_id++);
547 mp->total_exches++; 542 pool->total_exches++;
548 spin_unlock_bh(&mp->em_lock); 543 spin_unlock_bh(&pool->lock);
549 544
550 /* 545 /*
551 * update exchange 546 * update exchange
552 */ 547 */
553 ep->oxid = ep->xid = xid; 548 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
554 ep->em = mp; 549 ep->em = mp;
550 ep->pool = pool;
555 ep->lp = lport; 551 ep->lp = lport;
556 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 552 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
557 ep->rxid = FC_XID_UNKNOWN; 553 ep->rxid = FC_XID_UNKNOWN;
@@ -560,7 +556,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
560out: 556out:
561 return ep; 557 return ep;
562err: 558err:
563 spin_unlock_bh(&mp->em_lock); 559 spin_unlock_bh(&pool->lock);
564 atomic_inc(&mp->stats.no_free_exch_xid); 560 atomic_inc(&mp->stats.no_free_exch_xid);
565 mempool_free(ep, mp->ep_pool); 561 mempool_free(ep, mp->ep_pool);
566 return NULL; 562 return NULL;
@@ -597,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc);
597 */ 593 */
598static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 594static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
599{ 595{
596 struct fc_exch_pool *pool;
600 struct fc_exch *ep = NULL; 597 struct fc_exch *ep = NULL;
601 598
602 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { 599 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
603 spin_lock_bh(&mp->em_lock); 600 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
604 ep = mp->exches[xid - mp->min_xid]; 601 spin_lock_bh(&pool->lock);
602 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
605 if (ep) { 603 if (ep) {
606 fc_exch_hold(ep); 604 fc_exch_hold(ep);
607 WARN_ON(ep->xid != xid); 605 WARN_ON(ep->xid != xid);
608 } 606 }
609 spin_unlock_bh(&mp->em_lock); 607 spin_unlock_bh(&pool->lock);
610 } 608 }
611 return ep; 609 return ep;
612} 610}
@@ -620,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp)
620 rc = fc_exch_done_locked(ep); 618 rc = fc_exch_done_locked(ep);
621 spin_unlock_bh(&ep->ex_lock); 619 spin_unlock_bh(&ep->ex_lock);
622 if (!rc) 620 if (!rc)
623 fc_exch_mgr_delete_ep(ep); 621 fc_exch_delete(ep);
624} 622}
625EXPORT_SYMBOL(fc_exch_done); 623EXPORT_SYMBOL(fc_exch_done);
626 624
@@ -1213,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1213 WARN_ON(fc_seq_exch(sp) != ep); 1211 WARN_ON(fc_seq_exch(sp) != ep);
1214 spin_unlock_bh(&ep->ex_lock); 1212 spin_unlock_bh(&ep->ex_lock);
1215 if (!rc) 1213 if (!rc)
1216 fc_exch_mgr_delete_ep(ep); 1214 fc_exch_delete(ep);
1217 } 1215 }
1218 1216
1219 /* 1217 /*
@@ -1323,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1323 rc = fc_exch_done_locked(ep); 1321 rc = fc_exch_done_locked(ep);
1324 spin_unlock_bh(&ep->ex_lock); 1322 spin_unlock_bh(&ep->ex_lock);
1325 if (!rc) 1323 if (!rc)
1326 fc_exch_mgr_delete_ep(ep); 1324 fc_exch_delete(ep);
1327 1325
1328 if (resp) 1326 if (resp)
1329 resp(sp, fp, ex_resp_arg); 1327 resp(sp, fp, ex_resp_arg);
@@ -1466,48 +1464,76 @@ static void fc_exch_reset(struct fc_exch *ep)
1466 rc = fc_exch_done_locked(ep); 1464 rc = fc_exch_done_locked(ep);
1467 spin_unlock_bh(&ep->ex_lock); 1465 spin_unlock_bh(&ep->ex_lock);
1468 if (!rc) 1466 if (!rc)
1469 fc_exch_mgr_delete_ep(ep); 1467 fc_exch_delete(ep);
1470 1468
1471 if (resp) 1469 if (resp)
1472 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1470 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1473} 1471}
1474 1472
1475/* 1473/**
1476 * Reset an exchange manager, releasing all sequences and exchanges. 1474 * fc_exch_pool_reset() - Resets an per cpu exches pool.
1477 * If sid is non-zero, reset only exchanges we source from that FID. 1475 * @lport: ptr to the local port
1478 * If did is non-zero, reset only exchanges destined to that FID. 1476 * @pool: ptr to the per cpu exches pool
1477 * @sid: source FC ID
1478 * @did: destination FC ID
1479 *
1480 * Resets an per cpu exches pool, releasing its all sequences
1481 * and exchanges. If sid is non-zero, then reset only exchanges
1482 * we sourced from that FID. If did is non-zero, reset only
1483 * exchanges destined to that FID.
1479 */ 1484 */
1480void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) 1485static void fc_exch_pool_reset(struct fc_lport *lport,
1486 struct fc_exch_pool *pool,
1487 u32 sid, u32 did)
1481{ 1488{
1482 struct fc_exch *ep; 1489 struct fc_exch *ep;
1483 struct fc_exch *next; 1490 struct fc_exch *next;
1484 struct fc_exch_mgr *mp;
1485 struct fc_exch_mgr_anchor *ema;
1486 1491
1487 list_for_each_entry(ema, &lp->ema_list, ema_list) { 1492 spin_lock_bh(&pool->lock);
1488 mp = ema->mp;
1489 spin_lock_bh(&mp->em_lock);
1490restart: 1493restart:
1491 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { 1494 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1492 if ((lp == ep->lp) && 1495 if ((lport == ep->lp) &&
1493 (sid == 0 || sid == ep->sid) && 1496 (sid == 0 || sid == ep->sid) &&
1494 (did == 0 || did == ep->did)) { 1497 (did == 0 || did == ep->did)) {
1495 fc_exch_hold(ep); 1498 fc_exch_hold(ep);
1496 spin_unlock_bh(&mp->em_lock); 1499 spin_unlock_bh(&pool->lock);
1497 1500
1498 fc_exch_reset(ep); 1501 fc_exch_reset(ep);
1499 1502
1500 fc_exch_release(ep); 1503 fc_exch_release(ep);
1501 spin_lock_bh(&mp->em_lock); 1504 spin_lock_bh(&pool->lock);
1502 1505
1503 /* 1506 /*
1504 * must restart loop incase while lock 1507 * must restart loop incase while lock
1505 * was down multiple eps were released. 1508 * was down multiple eps were released.
1506 */ 1509 */
1507 goto restart; 1510 goto restart;
1508 }
1509 } 1511 }
1510 spin_unlock_bh(&mp->em_lock); 1512 }
1513 spin_unlock_bh(&pool->lock);
1514}
1515
1516/**
1517 * fc_exch_mgr_reset() - Resets all EMs of a lport
1518 * @lport: ptr to the local port
1519 * @sid: source FC ID
1520 * @did: destination FC ID
1521 *
1522 * Reset all EMs of a lport, releasing its all sequences and
1523 * exchanges. If sid is non-zero, then reset only exchanges
1524 * we sourced from that FID. If did is non-zero, reset only
1525 * exchanges destined to that FID.
1526 */
1527void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1528{
1529 struct fc_exch_mgr_anchor *ema;
1530 unsigned int cpu;
1531
1532 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1533 for_each_possible_cpu(cpu)
1534 fc_exch_pool_reset(lport,
1535 per_cpu_ptr(ema->mp->pool, cpu),
1536 sid, did);
1511 } 1537 }
1512} 1538}
1513EXPORT_SYMBOL(fc_exch_mgr_reset); 1539EXPORT_SYMBOL(fc_exch_mgr_reset);
@@ -1777,11 +1803,6 @@ static void fc_exch_mgr_destroy(struct kref *kref)
1777{ 1803{
1778 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); 1804 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
1779 1805
1780 /*
1781 * The total exch count must be zero
1782 * before freeing exchange manager.
1783 */
1784 WARN_ON(mp->total_exches != 0);
1785 mempool_destroy(mp->ep_pool); 1806 mempool_destroy(mp->ep_pool);
1786 free_percpu(mp->pool); 1807 free_percpu(mp->pool);
1787 kfree(mp); 1808 kfree(mp);
@@ -1802,7 +1823,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1802 bool (*match)(struct fc_frame *)) 1823 bool (*match)(struct fc_frame *))
1803{ 1824{
1804 struct fc_exch_mgr *mp; 1825 struct fc_exch_mgr *mp;
1805 size_t len;
1806 u16 pool_exch_range; 1826 u16 pool_exch_range;
1807 size_t pool_size; 1827 size_t pool_size;
1808 unsigned int cpu; 1828 unsigned int cpu;
@@ -1816,25 +1836,16 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1816 } 1836 }
1817 1837
1818 /* 1838 /*
1819 * Memory need for EM 1839 * allocate memory for EM
1820 */ 1840 */
1821 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); 1841 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
1822 len += sizeof(struct fc_exch_mgr);
1823
1824 mp = kzalloc(len, GFP_ATOMIC);
1825 if (!mp) 1842 if (!mp)
1826 return NULL; 1843 return NULL;
1827 1844
1828 mp->class = class; 1845 mp->class = class;
1829 mp->total_exches = 0;
1830 mp->exches = (struct fc_exch **)(mp + 1);
1831 /* adjust em exch xid range for offload */ 1846 /* adjust em exch xid range for offload */
1832 mp->min_xid = min_xid; 1847 mp->min_xid = min_xid;
1833 mp->max_xid = max_xid; 1848 mp->max_xid = max_xid;
1834 mp->next_xid = min_xid;
1835
1836 INIT_LIST_HEAD(&mp->ex_list);
1837 spin_lock_init(&mp->em_lock);
1838 1849
1839 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); 1850 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1840 if (!mp->ep_pool) 1851 if (!mp->ep_pool)
@@ -1944,7 +1955,7 @@ err:
1944 rc = fc_exch_done_locked(ep); 1955 rc = fc_exch_done_locked(ep);
1945 spin_unlock_bh(&ep->ex_lock); 1956 spin_unlock_bh(&ep->ex_lock);
1946 if (!rc) 1957 if (!rc)
1947 fc_exch_mgr_delete_ep(ep); 1958 fc_exch_delete(ep);
1948 return NULL; 1959 return NULL;
1949} 1960}
1950EXPORT_SYMBOL(fc_exch_seq_send); 1961EXPORT_SYMBOL(fc_exch_seq_send);