aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cnic.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2009-08-14 11:49:47 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-15 21:50:47 -0400
commit7fc1ece40704b150477e548a7a98d285cc418790 (patch)
treefd7f72c1b92ddde327fb72235a256539ac193b8c /drivers/net/cnic.c
parent681dbd710779e8b8d5bae926f6b11f30df70638b (diff)
cnic: Fix locking in init/exit calls.
The slow path ulp_init and ulp_exit calls to the bnx2i driver are sleepable calls and therefore should not be protected using rcu_read_lock. Fix it by using mutex and refcount during these calls. cnic_unregister_driver() will now wait for the refcount to go to zero before completing the call. Signed-off-by: Michael Chan <mchan@broadcom.com> Reviewed-by: Benjamin Li <benli@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r--drivers/net/cnic.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4ff618a7afd2..74c342959b7b 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
138 return NULL; 138 return NULL;
139} 139}
140 140
141static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
142{
143 atomic_inc(&ulp_ops->ref_count);
144}
145
146static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
147{
148 atomic_dec(&ulp_ops->ref_count);
149}
150
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 151static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{ 152{
143 struct cnic_local *cp = dev->cnic_priv; 153 struct cnic_local *cp = dev->cnic_priv;
@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
358 } 368 }
359 read_unlock(&cnic_dev_lock); 369 read_unlock(&cnic_dev_lock);
360 370
371 atomic_set(&ulp_ops->ref_count, 0);
361 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 372 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
362 mutex_unlock(&cnic_lock); 373 mutex_unlock(&cnic_lock);
363 374
@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
379int cnic_unregister_driver(int ulp_type) 390int cnic_unregister_driver(int ulp_type)
380{ 391{
381 struct cnic_dev *dev; 392 struct cnic_dev *dev;
393 struct cnic_ulp_ops *ulp_ops;
394 int i = 0;
382 395
383 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 396 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
384 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
386 return -EINVAL; 399 return -EINVAL;
387 } 400 }
388 mutex_lock(&cnic_lock); 401 mutex_lock(&cnic_lock);
389 if (!cnic_ulp_tbl[ulp_type]) { 402 ulp_ops = cnic_ulp_tbl[ulp_type];
403 if (!ulp_ops) {
390 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " 404 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
391 "been registered\n", ulp_type); 405 "been registered\n", ulp_type);
392 goto out_unlock; 406 goto out_unlock;
@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
411 425
412 mutex_unlock(&cnic_lock); 426 mutex_unlock(&cnic_lock);
413 synchronize_rcu(); 427 synchronize_rcu();
428 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
429 msleep(100);
430 i++;
431 }
432
433 if (atomic_read(&ulp_ops->ref_count) != 0)
434 printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
435 " to zero.\n", dev->netdev->name);
414 return 0; 436 return 0;
415 437
416out_unlock: 438out_unlock:
@@ -1161,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
1161 int i; 1183 int i;
1162 struct cnic_local *cp = dev->cnic_priv; 1184 struct cnic_local *cp = dev->cnic_priv;
1163 1185
1164 rcu_read_lock();
1165 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1186 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1166 struct cnic_ulp_ops *ulp_ops; 1187 struct cnic_ulp_ops *ulp_ops;
1167 1188
1168 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1189 mutex_lock(&cnic_lock);
1169 if (!ulp_ops || !ulp_ops->cnic_init) 1190 ulp_ops = cnic_ulp_tbl[i];
1191 if (!ulp_ops || !ulp_ops->cnic_init) {
1192 mutex_unlock(&cnic_lock);
1170 continue; 1193 continue;
1194 }
1195 ulp_get(ulp_ops);
1196 mutex_unlock(&cnic_lock);
1171 1197
1172 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1198 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1173 ulp_ops->cnic_init(dev); 1199 ulp_ops->cnic_init(dev);
1174 1200
1201 ulp_put(ulp_ops);
1175 } 1202 }
1176 rcu_read_unlock();
1177} 1203}
1178 1204
1179static void cnic_ulp_exit(struct cnic_dev *dev) 1205static void cnic_ulp_exit(struct cnic_dev *dev)
@@ -1181,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
1181 int i; 1207 int i;
1182 struct cnic_local *cp = dev->cnic_priv; 1208 struct cnic_local *cp = dev->cnic_priv;
1183 1209
1184 rcu_read_lock();
1185 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1210 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1186 struct cnic_ulp_ops *ulp_ops; 1211 struct cnic_ulp_ops *ulp_ops;
1187 1212
1188 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1213 mutex_lock(&cnic_lock);
1189 if (!ulp_ops || !ulp_ops->cnic_exit) 1214 ulp_ops = cnic_ulp_tbl[i];
1215 if (!ulp_ops || !ulp_ops->cnic_exit) {
1216 mutex_unlock(&cnic_lock);
1190 continue; 1217 continue;
1218 }
1219 ulp_get(ulp_ops);
1220 mutex_unlock(&cnic_lock);
1191 1221
1192 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1222 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1193 ulp_ops->cnic_exit(dev); 1223 ulp_ops->cnic_exit(dev);
1194 1224
1225 ulp_put(ulp_ops);
1195 } 1226 }
1196 rcu_read_unlock();
1197} 1227}
1198 1228
1199static int cnic_cm_offload_pg(struct cnic_sock *csk) 1229static int cnic_cm_offload_pg(struct cnic_sock *csk)