aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c93
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h10
7 files changed, 77 insertions, 74 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index cb5b7f7d4d38..b29a4246ef41 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -2219,7 +2219,6 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2219 u32 wqe_idx; 2219 u32 wqe_idx;
2220 2220
2221 if (!qp->wqe_wr_id_tbl[tail].signaled) { 2221 if (!qp->wqe_wr_id_tbl[tail].signaled) {
2222 expand = true; /* CQE cannot be consumed yet */
2223 *polled = false; /* WC cannot be consumed yet */ 2222 *polled = false; /* WC cannot be consumed yet */
2224 } else { 2223 } else {
2225 ibwc->status = IB_WC_SUCCESS; 2224 ibwc->status = IB_WC_SUCCESS;
@@ -2227,10 +2226,11 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2227 ibwc->qp = &qp->ibqp; 2226 ibwc->qp = &qp->ibqp;
2228 ocrdma_update_wc(qp, ibwc, tail); 2227 ocrdma_update_wc(qp, ibwc, tail);
2229 *polled = true; 2228 *polled = true;
2230 wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
2231 if (tail != wqe_idx)
2232 expand = true; /* Coalesced CQE can't be consumed yet */
2233 } 2229 }
2230 wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
2231 if (tail != wqe_idx)
2232 expand = true; /* Coalesced CQE can't be consumed yet */
2233
2234 ocrdma_hwq_inc_tail(&qp->sq); 2234 ocrdma_hwq_inc_tail(&qp->sq);
2235 return expand; 2235 return expand;
2236} 2236}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 19f1e6c45fb6..ccb119143d20 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -471,9 +471,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
471 if (port_num != port) { 471 if (port_num != port) {
472 ibp = to_iport(ibdev, port_num); 472 ibp = to_iport(ibdev, port_num);
473 ret = check_mkey(ibp, smp, 0); 473 ret = check_mkey(ibp, smp, 0);
474 if (ret) 474 if (ret) {
475 ret = IB_MAD_RESULT_FAILURE; 475 ret = IB_MAD_RESULT_FAILURE;
476 goto bail; 476 goto bail;
477 }
477 } 478 }
478 } 479 }
479 480
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca43901ed861..0af216d21f87 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -262,7 +262,10 @@ struct ipoib_ethtool_st {
262 u16 max_coalesced_frames; 262 u16 max_coalesced_frames;
263}; 263};
264 264
265struct ipoib_neigh_table;
266
265struct ipoib_neigh_hash { 267struct ipoib_neigh_hash {
268 struct ipoib_neigh_table *ntbl;
266 struct ipoib_neigh __rcu **buckets; 269 struct ipoib_neigh __rcu **buckets;
267 struct rcu_head rcu; 270 struct rcu_head rcu;
268 u32 mask; 271 u32 mask;
@@ -271,9 +274,9 @@ struct ipoib_neigh_hash {
271 274
272struct ipoib_neigh_table { 275struct ipoib_neigh_table {
273 struct ipoib_neigh_hash __rcu *htbl; 276 struct ipoib_neigh_hash __rcu *htbl;
274 rwlock_t rwlock;
275 atomic_t entries; 277 atomic_t entries;
276 struct completion flushed; 278 struct completion flushed;
279 struct completion deleted;
277}; 280};
278 281
279/* 282/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3e2085a3ee47..1e19b5ae7c47 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -546,15 +546,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
546 struct ipoib_neigh *neigh; 546 struct ipoib_neigh *neigh;
547 unsigned long flags; 547 unsigned long flags;
548 548
549 spin_lock_irqsave(&priv->lock, flags);
549 neigh = ipoib_neigh_alloc(daddr, dev); 550 neigh = ipoib_neigh_alloc(daddr, dev);
550 if (!neigh) { 551 if (!neigh) {
552 spin_unlock_irqrestore(&priv->lock, flags);
551 ++dev->stats.tx_dropped; 553 ++dev->stats.tx_dropped;
552 dev_kfree_skb_any(skb); 554 dev_kfree_skb_any(skb);
553 return; 555 return;
554 } 556 }
555 557
556 spin_lock_irqsave(&priv->lock, flags);
557
558 path = __path_find(dev, daddr + 4); 558 path = __path_find(dev, daddr + 4);
559 if (!path) { 559 if (!path) {
560 path = path_rec_create(dev, daddr + 4); 560 path = path_rec_create(dev, daddr + 4);
@@ -863,10 +863,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
863 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 863 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
864 return; 864 return;
865 865
866 write_lock_bh(&ntbl->rwlock); 866 spin_lock_irqsave(&priv->lock, flags);
867 867
868 htbl = rcu_dereference_protected(ntbl->htbl, 868 htbl = rcu_dereference_protected(ntbl->htbl,
869 lockdep_is_held(&ntbl->rwlock)); 869 lockdep_is_held(&priv->lock));
870 870
871 if (!htbl) 871 if (!htbl)
872 goto out_unlock; 872 goto out_unlock;
@@ -883,16 +883,14 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
883 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 883 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
884 884
885 while ((neigh = rcu_dereference_protected(*np, 885 while ((neigh = rcu_dereference_protected(*np,
886 lockdep_is_held(&ntbl->rwlock))) != NULL) { 886 lockdep_is_held(&priv->lock))) != NULL) {
887 /* was the neigh idle for two GC periods */ 887 /* was the neigh idle for two GC periods */
888 if (time_after(neigh_obsolete, neigh->alive)) { 888 if (time_after(neigh_obsolete, neigh->alive)) {
889 rcu_assign_pointer(*np, 889 rcu_assign_pointer(*np,
890 rcu_dereference_protected(neigh->hnext, 890 rcu_dereference_protected(neigh->hnext,
891 lockdep_is_held(&ntbl->rwlock))); 891 lockdep_is_held(&priv->lock)));
892 /* remove from path/mc list */ 892 /* remove from path/mc list */
893 spin_lock_irqsave(&priv->lock, flags);
894 list_del(&neigh->list); 893 list_del(&neigh->list);
895 spin_unlock_irqrestore(&priv->lock, flags);
896 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 894 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
897 } else { 895 } else {
898 np = &neigh->hnext; 896 np = &neigh->hnext;
@@ -902,7 +900,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
902 } 900 }
903 901
904out_unlock: 902out_unlock:
905 write_unlock_bh(&ntbl->rwlock); 903 spin_unlock_irqrestore(&priv->lock, flags);
906} 904}
907 905
908static void ipoib_reap_neigh(struct work_struct *work) 906static void ipoib_reap_neigh(struct work_struct *work)
@@ -947,10 +945,8 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
947 struct ipoib_neigh *neigh; 945 struct ipoib_neigh *neigh;
948 u32 hash_val; 946 u32 hash_val;
949 947
950 write_lock_bh(&ntbl->rwlock);
951
952 htbl = rcu_dereference_protected(ntbl->htbl, 948 htbl = rcu_dereference_protected(ntbl->htbl,
953 lockdep_is_held(&ntbl->rwlock)); 949 lockdep_is_held(&priv->lock));
954 if (!htbl) { 950 if (!htbl) {
955 neigh = NULL; 951 neigh = NULL;
956 goto out_unlock; 952 goto out_unlock;
@@ -961,10 +957,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
961 */ 957 */
962 hash_val = ipoib_addr_hash(htbl, daddr); 958 hash_val = ipoib_addr_hash(htbl, daddr);
963 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 959 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
964 lockdep_is_held(&ntbl->rwlock)); 960 lockdep_is_held(&priv->lock));
965 neigh != NULL; 961 neigh != NULL;
966 neigh = rcu_dereference_protected(neigh->hnext, 962 neigh = rcu_dereference_protected(neigh->hnext,
967 lockdep_is_held(&ntbl->rwlock))) { 963 lockdep_is_held(&priv->lock))) {
968 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 964 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
969 /* found, take one ref on behalf of the caller */ 965 /* found, take one ref on behalf of the caller */
970 if (!atomic_inc_not_zero(&neigh->refcnt)) { 966 if (!atomic_inc_not_zero(&neigh->refcnt)) {
@@ -987,12 +983,11 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
987 /* put in hash */ 983 /* put in hash */
988 rcu_assign_pointer(neigh->hnext, 984 rcu_assign_pointer(neigh->hnext,
989 rcu_dereference_protected(htbl->buckets[hash_val], 985 rcu_dereference_protected(htbl->buckets[hash_val],
990 lockdep_is_held(&ntbl->rwlock))); 986 lockdep_is_held(&priv->lock)));
991 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 987 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
992 atomic_inc(&ntbl->entries); 988 atomic_inc(&ntbl->entries);
993 989
994out_unlock: 990out_unlock:
995 write_unlock_bh(&ntbl->rwlock);
996 991
997 return neigh; 992 return neigh;
998} 993}
@@ -1040,35 +1035,29 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
1040 struct ipoib_neigh *n; 1035 struct ipoib_neigh *n;
1041 u32 hash_val; 1036 u32 hash_val;
1042 1037
1043 write_lock_bh(&ntbl->rwlock);
1044
1045 htbl = rcu_dereference_protected(ntbl->htbl, 1038 htbl = rcu_dereference_protected(ntbl->htbl,
1046 lockdep_is_held(&ntbl->rwlock)); 1039 lockdep_is_held(&priv->lock));
1047 if (!htbl) 1040 if (!htbl)
1048 goto out_unlock; 1041 return;
1049 1042
1050 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1043 hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1051 np = &htbl->buckets[hash_val]; 1044 np = &htbl->buckets[hash_val];
1052 for (n = rcu_dereference_protected(*np, 1045 for (n = rcu_dereference_protected(*np,
1053 lockdep_is_held(&ntbl->rwlock)); 1046 lockdep_is_held(&priv->lock));
1054 n != NULL; 1047 n != NULL;
1055 n = rcu_dereference_protected(*np, 1048 n = rcu_dereference_protected(*np,
1056 lockdep_is_held(&ntbl->rwlock))) { 1049 lockdep_is_held(&priv->lock))) {
1057 if (n == neigh) { 1050 if (n == neigh) {
1058 /* found */ 1051 /* found */
1059 rcu_assign_pointer(*np, 1052 rcu_assign_pointer(*np,
1060 rcu_dereference_protected(neigh->hnext, 1053 rcu_dereference_protected(neigh->hnext,
1061 lockdep_is_held(&ntbl->rwlock))); 1054 lockdep_is_held(&priv->lock)));
1062 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1055 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1063 goto out_unlock; 1056 return;
1064 } else { 1057 } else {
1065 np = &n->hnext; 1058 np = &n->hnext;
1066 } 1059 }
1067 } 1060 }
1068
1069out_unlock:
1070 write_unlock_bh(&ntbl->rwlock);
1071
1072} 1061}
1073 1062
1074static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1063static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
@@ -1080,7 +1069,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1080 1069
1081 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1070 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1082 ntbl->htbl = NULL; 1071 ntbl->htbl = NULL;
1083 rwlock_init(&ntbl->rwlock);
1084 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1072 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1085 if (!htbl) 1073 if (!htbl)
1086 return -ENOMEM; 1074 return -ENOMEM;
@@ -1095,6 +1083,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1095 htbl->mask = (size - 1); 1083 htbl->mask = (size - 1);
1096 htbl->buckets = buckets; 1084 htbl->buckets = buckets;
1097 ntbl->htbl = htbl; 1085 ntbl->htbl = htbl;
1086 htbl->ntbl = ntbl;
1098 atomic_set(&ntbl->entries, 0); 1087 atomic_set(&ntbl->entries, 0);
1099 1088
1100 /* start garbage collection */ 1089 /* start garbage collection */
@@ -1111,9 +1100,11 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
1111 struct ipoib_neigh_hash, 1100 struct ipoib_neigh_hash,
1112 rcu); 1101 rcu);
1113 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1102 struct ipoib_neigh __rcu **buckets = htbl->buckets;
1103 struct ipoib_neigh_table *ntbl = htbl->ntbl;
1114 1104
1115 kfree(buckets); 1105 kfree(buckets);
1116 kfree(htbl); 1106 kfree(htbl);
1107 complete(&ntbl->deleted);
1117} 1108}
1118 1109
1119void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1110void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
@@ -1125,10 +1116,10 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1125 int i; 1116 int i;
1126 1117
1127 /* remove all neigh connected to a given path or mcast */ 1118 /* remove all neigh connected to a given path or mcast */
1128 write_lock_bh(&ntbl->rwlock); 1119 spin_lock_irqsave(&priv->lock, flags);
1129 1120
1130 htbl = rcu_dereference_protected(ntbl->htbl, 1121 htbl = rcu_dereference_protected(ntbl->htbl,
1131 lockdep_is_held(&ntbl->rwlock)); 1122 lockdep_is_held(&priv->lock));
1132 1123
1133 if (!htbl) 1124 if (!htbl)
1134 goto out_unlock; 1125 goto out_unlock;
@@ -1138,16 +1129,14 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1138 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1129 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1139 1130
1140 while ((neigh = rcu_dereference_protected(*np, 1131 while ((neigh = rcu_dereference_protected(*np,
1141 lockdep_is_held(&ntbl->rwlock))) != NULL) { 1132 lockdep_is_held(&priv->lock))) != NULL) {
1142 /* delete neighs belong to this parent */ 1133 /* delete neighs belong to this parent */
1143 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1134 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1144 rcu_assign_pointer(*np, 1135 rcu_assign_pointer(*np,
1145 rcu_dereference_protected(neigh->hnext, 1136 rcu_dereference_protected(neigh->hnext,
1146 lockdep_is_held(&ntbl->rwlock))); 1137 lockdep_is_held(&priv->lock)));
1147 /* remove from parent list */ 1138 /* remove from parent list */
1148 spin_lock_irqsave(&priv->lock, flags);
1149 list_del(&neigh->list); 1139 list_del(&neigh->list);
1150 spin_unlock_irqrestore(&priv->lock, flags);
1151 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1140 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1152 } else { 1141 } else {
1153 np = &neigh->hnext; 1142 np = &neigh->hnext;
@@ -1156,7 +1145,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1156 } 1145 }
1157 } 1146 }
1158out_unlock: 1147out_unlock:
1159 write_unlock_bh(&ntbl->rwlock); 1148 spin_unlock_irqrestore(&priv->lock, flags);
1160} 1149}
1161 1150
1162static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1151static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
@@ -1164,37 +1153,44 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1164 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1153 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1165 struct ipoib_neigh_hash *htbl; 1154 struct ipoib_neigh_hash *htbl;
1166 unsigned long flags; 1155 unsigned long flags;
1167 int i; 1156 int i, wait_flushed = 0;
1168 1157
1169 write_lock_bh(&ntbl->rwlock); 1158 init_completion(&priv->ntbl.flushed);
1159
1160 spin_lock_irqsave(&priv->lock, flags);
1170 1161
1171 htbl = rcu_dereference_protected(ntbl->htbl, 1162 htbl = rcu_dereference_protected(ntbl->htbl,
1172 lockdep_is_held(&ntbl->rwlock)); 1163 lockdep_is_held(&priv->lock));
1173 if (!htbl) 1164 if (!htbl)
1174 goto out_unlock; 1165 goto out_unlock;
1175 1166
1167 wait_flushed = atomic_read(&priv->ntbl.entries);
1168 if (!wait_flushed)
1169 goto free_htbl;
1170
1176 for (i = 0; i < htbl->size; i++) { 1171 for (i = 0; i < htbl->size; i++) {
1177 struct ipoib_neigh *neigh; 1172 struct ipoib_neigh *neigh;
1178 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1173 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1179 1174
1180 while ((neigh = rcu_dereference_protected(*np, 1175 while ((neigh = rcu_dereference_protected(*np,
1181 lockdep_is_held(&ntbl->rwlock))) != NULL) { 1176 lockdep_is_held(&priv->lock))) != NULL) {
1182 rcu_assign_pointer(*np, 1177 rcu_assign_pointer(*np,
1183 rcu_dereference_protected(neigh->hnext, 1178 rcu_dereference_protected(neigh->hnext,
1184 lockdep_is_held(&ntbl->rwlock))); 1179 lockdep_is_held(&priv->lock)));
1185 /* remove from path/mc list */ 1180 /* remove from path/mc list */
1186 spin_lock_irqsave(&priv->lock, flags);
1187 list_del(&neigh->list); 1181 list_del(&neigh->list);
1188 spin_unlock_irqrestore(&priv->lock, flags);
1189 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1182 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1190 } 1183 }
1191 } 1184 }
1192 1185
1186free_htbl:
1193 rcu_assign_pointer(ntbl->htbl, NULL); 1187 rcu_assign_pointer(ntbl->htbl, NULL);
1194 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1188 call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1195 1189
1196out_unlock: 1190out_unlock:
1197 write_unlock_bh(&ntbl->rwlock); 1191 spin_unlock_irqrestore(&priv->lock, flags);
1192 if (wait_flushed)
1193 wait_for_completion(&priv->ntbl.flushed);
1198} 1194}
1199 1195
1200static void ipoib_neigh_hash_uninit(struct net_device *dev) 1196static void ipoib_neigh_hash_uninit(struct net_device *dev)
@@ -1203,7 +1199,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
1203 int stopped; 1199 int stopped;
1204 1200
1205 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1201 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1206 init_completion(&priv->ntbl.flushed); 1202 init_completion(&priv->ntbl.deleted);
1207 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1203 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1208 1204
1209 /* Stop GC if called at init fail need to cancel work */ 1205 /* Stop GC if called at init fail need to cancel work */
@@ -1211,10 +1207,9 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
1211 if (!stopped) 1207 if (!stopped)
1212 cancel_delayed_work(&priv->neigh_reap_task); 1208 cancel_delayed_work(&priv->neigh_reap_task);
1213 1209
1214 if (atomic_read(&priv->ntbl.entries)) { 1210 ipoib_flush_neighs(priv);
1215 ipoib_flush_neighs(priv); 1211
1216 wait_for_completion(&priv->ntbl.flushed); 1212 wait_for_completion(&priv->ntbl.deleted);
1217 }
1218} 1213}
1219 1214
1220 1215
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 13f4aa7593c8..75367249f447 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -707,9 +707,7 @@ out:
707 neigh = ipoib_neigh_get(dev, daddr); 707 neigh = ipoib_neigh_get(dev, daddr);
708 spin_lock_irqsave(&priv->lock, flags); 708 spin_lock_irqsave(&priv->lock, flags);
709 if (!neigh) { 709 if (!neigh) {
710 spin_unlock_irqrestore(&priv->lock, flags);
711 neigh = ipoib_neigh_alloc(daddr, dev); 710 neigh = ipoib_neigh_alloc(daddr, dev);
712 spin_lock_irqsave(&priv->lock, flags);
713 if (neigh) { 711 if (neigh) {
714 kref_get(&mcast->ah->ref); 712 kref_get(&mcast->ah->ref);
715 neigh->ah = mcast->ah; 713 neigh->ah = mcast->ah;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index daf417923661..31d02649be41 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -227,9 +227,10 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
227 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 227 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
228} 228}
229 229
230int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) 230int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
231{ 231{
232 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 232 u32 i = (obj & (table->num_obj - 1)) /
233 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
233 int ret = 0; 234 int ret = 0;
234 235
235 mutex_lock(&table->mutex); 236 mutex_lock(&table->mutex);
@@ -262,16 +263,18 @@ out:
262 return ret; 263 return ret;
263} 264}
264 265
265void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) 266void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
266{ 267{
267 int i; 268 u32 i;
269 u64 offset;
268 270
269 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 271 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
270 272
271 mutex_lock(&table->mutex); 273 mutex_lock(&table->mutex);
272 274
273 if (--table->icm[i]->refcount == 0) { 275 if (--table->icm[i]->refcount == 0) {
274 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 276 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
277 mlx4_UNMAP_ICM(dev, table->virt + offset,
275 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 278 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
276 mlx4_free_icm(dev, table->icm[i], table->coherent); 279 mlx4_free_icm(dev, table->icm[i], table->coherent);
277 table->icm[i] = NULL; 280 table->icm[i] = NULL;
@@ -280,9 +283,11 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
280 mutex_unlock(&table->mutex); 283 mutex_unlock(&table->mutex);
281} 284}
282 285
283void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle) 286void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
287 dma_addr_t *dma_handle)
284{ 288{
285 int idx, offset, dma_offset, i; 289 int offset, dma_offset, i;
290 u64 idx;
286 struct mlx4_icm_chunk *chunk; 291 struct mlx4_icm_chunk *chunk;
287 struct mlx4_icm *icm; 292 struct mlx4_icm *icm;
288 struct page *page = NULL; 293 struct page *page = NULL;
@@ -292,7 +297,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
292 297
293 mutex_lock(&table->mutex); 298 mutex_lock(&table->mutex);
294 299
295 idx = (obj & (table->num_obj - 1)) * table->obj_size; 300 idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
296 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; 301 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
297 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; 302 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
298 303
@@ -326,10 +331,11 @@ out:
326} 331}
327 332
328int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 333int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
329 int start, int end) 334 u32 start, u32 end)
330{ 335{
331 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; 336 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
332 int i, err; 337 int err;
338 u32 i;
333 339
334 for (i = start; i <= end; i += inc) { 340 for (i = start; i <= end; i += inc) {
335 err = mlx4_table_get(dev, table, i); 341 err = mlx4_table_get(dev, table, i);
@@ -349,9 +355,9 @@ fail:
349} 355}
350 356
351void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 357void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
352 int start, int end) 358 u32 start, u32 end)
353{ 359{
354 int i; 360 u32 i;
355 361
356 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) 362 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
357 mlx4_table_put(dev, table, i); 363 mlx4_table_put(dev, table, i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index a67744f53506..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,17 +71,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent); 71 gfp_t gfp_mask, int coherent);
72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); 72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
73 73
74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); 74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
75void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); 75void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
76int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 76int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
77 int start, int end); 77 u32 start, u32 end);
78void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 78void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
79 int start, int end); 79 u32 start, u32 end);
80int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, 80int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
81 u64 virt, int obj_size, u32 nobj, int reserved, 81 u64 virt, int obj_size, u32 nobj, int reserved,
82 int use_lowmem, int use_coherent); 82 int use_lowmem, int use_coherent);
83void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); 83void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
84void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); 84void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
85 85
86static inline void mlx4_icm_first(struct mlx4_icm *icm, 86static inline void mlx4_icm_first(struct mlx4_icm *icm,
87 struct mlx4_icm_iter *iter) 87 struct mlx4_icm_iter *iter)