aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
diff options
context:
space:
mode:
authorRajesh Borundia <rajesh.borundia@qlogic.com>2013-03-29 01:46:38 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-29 15:51:06 -0400
commite8b508ef71fb70ec761086532716b19d3c4773e5 (patch)
tree22e27a6c8369ffff72375ead0a120bbf16b9f9a0 /drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
parent7cb03b2347d5edace4fb8e7dd9d6c3889368a179 (diff)
qlcnic: Support atomic commands
o VFs might get scheduled out after sending a command to a PF and scheduled in after receiving a response. Implement a worker thread to handle atomic commands. Signed-off-by: Manish Chopra <manish.chopra@qlogic.com> Signed-off-by: Rajesh Borundia <rajesh.borundia@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c')
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c130
1 files changed, 129 insertions, 1 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6e927f2eb3c7..14e9ebd3b73a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -141,6 +141,16 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
141 141
142 bc->bc_trans_wq = wq; 142 bc->bc_trans_wq = wq;
143 143
144 wq = create_singlethread_workqueue("async");
145 if (wq == NULL) {
146 err = -ENOMEM;
147 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
148 goto qlcnic_destroy_trans_wq;
149 }
150
151 bc->bc_async_wq = wq;
152 INIT_LIST_HEAD(&bc->async_list);
153
144 for (i = 0; i < num_vfs; i++) { 154 for (i = 0; i < num_vfs; i++) {
145 vf = &sriov->vf_info[i]; 155 vf = &sriov->vf_info[i];
146 vf->adapter = adapter; 156 vf->adapter = adapter;
@@ -156,7 +166,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
156 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); 166 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
157 if (!vp) { 167 if (!vp) {
158 err = -ENOMEM; 168 err = -ENOMEM;
159 goto qlcnic_destroy_trans_wq; 169 goto qlcnic_destroy_async_wq;
160 } 170 }
161 sriov->vf_info[i].vp = vp; 171 sriov->vf_info[i].vp = vp;
162 random_ether_addr(vp->mac); 172 random_ether_addr(vp->mac);
@@ -168,6 +178,9 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
168 178
169 return 0; 179 return 0;
170 180
181qlcnic_destroy_async_wq:
182 destroy_workqueue(bc->bc_async_wq);
183
171qlcnic_destroy_trans_wq: 184qlcnic_destroy_trans_wq:
172 destroy_workqueue(bc->bc_trans_wq); 185 destroy_workqueue(bc->bc_trans_wq);
173 186
@@ -188,6 +201,8 @@ void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
188 if (!qlcnic_sriov_enable_check(adapter)) 201 if (!qlcnic_sriov_enable_check(adapter))
189 return; 202 return;
190 203
204 qlcnic_sriov_cleanup_async_list(bc);
205 destroy_workqueue(bc->bc_async_wq);
191 destroy_workqueue(bc->bc_trans_wq); 206 destroy_workqueue(bc->bc_trans_wq);
192 207
193 for (i = 0; i < sriov->num_vfs; i++) 208 for (i = 0; i < sriov->num_vfs; i++)
@@ -351,6 +366,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
351{ 366{
352 int err; 367 int err;
353 368
369 INIT_LIST_HEAD(&adapter->vf_mc_list);
354 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 370 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
355 dev_warn(&adapter->pdev->dev, 371 dev_warn(&adapter->pdev->dev,
356 "83xx adapter do not support MSI interrupts\n"); 372 "83xx adapter do not support MSI interrupts\n");
@@ -1167,3 +1183,115 @@ out:
1167 qlcnic_free_mbx_args(&cmd); 1183 qlcnic_free_mbx_args(&cmd);
1168 return ret; 1184 return ret;
1169} 1185}
1186
1187void qlcnic_vf_add_mc_list(struct net_device *netdev)
1188{
1189 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1190 struct qlcnic_mac_list_s *cur;
1191 struct list_head *head, tmp_list;
1192
1193 INIT_LIST_HEAD(&tmp_list);
1194 head = &adapter->vf_mc_list;
1195 netif_addr_lock_bh(netdev);
1196
1197 while (!list_empty(head)) {
1198 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1199 list_move(&cur->list, &tmp_list);
1200 }
1201
1202 netif_addr_unlock_bh(netdev);
1203
1204 while (!list_empty(&tmp_list)) {
1205 cur = list_entry((&tmp_list)->next,
1206 struct qlcnic_mac_list_s, list);
1207 qlcnic_nic_add_mac(adapter, cur->mac_addr);
1208 list_del(&cur->list);
1209 kfree(cur);
1210 }
1211}
1212
1213void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1214{
1215 struct list_head *head = &bc->async_list;
1216 struct qlcnic_async_work_list *entry;
1217
1218 while (!list_empty(head)) {
1219 entry = list_entry(head->next, struct qlcnic_async_work_list,
1220 list);
1221 cancel_work_sync(&entry->work);
1222 list_del(&entry->list);
1223 kfree(entry);
1224 }
1225}
1226
1227static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1228{
1229 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1230
1231 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1232 return;
1233
1234 __qlcnic_set_multi(netdev);
1235}
1236
1237static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1238{
1239 struct qlcnic_async_work_list *entry;
1240 struct net_device *netdev;
1241
1242 entry = container_of(work, struct qlcnic_async_work_list, work);
1243 netdev = (struct net_device *)entry->ptr;
1244
1245 qlcnic_sriov_vf_set_multi(netdev);
1246 return;
1247}
1248
1249static struct qlcnic_async_work_list *
1250qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1251{
1252 struct list_head *node;
1253 struct qlcnic_async_work_list *entry = NULL;
1254 u8 empty = 0;
1255
1256 list_for_each(node, &bc->async_list) {
1257 entry = list_entry(node, struct qlcnic_async_work_list, list);
1258 if (!work_pending(&entry->work)) {
1259 empty = 1;
1260 break;
1261 }
1262 }
1263
1264 if (!empty) {
1265 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1266 GFP_ATOMIC);
1267 if (entry == NULL)
1268 return NULL;
1269 list_add_tail(&entry->list, &bc->async_list);
1270 }
1271
1272 return entry;
1273}
1274
1275static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1276 work_func_t func, void *data)
1277{
1278 struct qlcnic_async_work_list *entry = NULL;
1279
1280 entry = qlcnic_sriov_get_free_node_async_work(bc);
1281 if (!entry)
1282 return;
1283
1284 entry->ptr = data;
1285 INIT_WORK(&entry->work, func);
1286 queue_work(bc->bc_async_wq, &entry->work);
1287}
1288
1289void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1290{
1291
1292 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1293 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1294
1295 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1296 netdev);
1297}