aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r--drivers/net/vxge/vxge-main.c245
1 files changed, 129 insertions, 116 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c30..b504bd561362 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
445 ring->ndev->name, __func__, __LINE__); 445 ring->ndev->name, __func__, __LINE__);
446 ring->pkts_processed = 0; 446 ring->pkts_processed = 0;
447 447
448 vxge_hw_ring_replenish(ringh, 0); 448 vxge_hw_ring_replenish(ringh);
449 449
450 do { 450 do {
451 prefetch((char *)dtr + L1_CACHE_BYTES); 451 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1118 */ 1118 */
1119static void vxge_set_multicast(struct net_device *dev) 1119static void vxge_set_multicast(struct net_device *dev)
1120{ 1120{
1121 struct dev_mc_list *mclist; 1121 struct netdev_hw_addr *ha;
1122 struct vxgedev *vdev; 1122 struct vxgedev *vdev;
1123 int i, mcast_cnt = 0; 1123 int i, mcast_cnt = 0;
1124 struct __vxge_hw_device *hldev; 1124 struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
1218 } 1218 }
1219 1219
1220 /* Add new ones */ 1220 /* Add new ones */
1221 netdev_for_each_mc_addr(mclist, dev) { 1221 netdev_for_each_mc_addr(ha, dev) {
1222 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1222 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1224 vpath_idx++) { 1224 vpath_idx++) {
1225 mac_info.vpath_no = vpath_idx; 1225 mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1365{ 1365{
1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1367 int msix_id, alarm_msix_id; 1367 int msix_id = 0;
1368 int tim_msix_id[4] = {[0 ...3] = 0}; 1368 int tim_msix_id[4] = {0, 1, 0, 0};
1369 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1369 1370
1370 vxge_hw_vpath_intr_enable(vpath->handle); 1371 vxge_hw_vpath_intr_enable(vpath->handle);
1371 1372
1372 if (vdev->config.intr_type == INTA) 1373 if (vdev->config.intr_type == INTA)
1373 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); 1374 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1374 else { 1375 else {
1375 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1376 alarm_msix_id =
1377 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1378
1379 tim_msix_id[0] = msix_id;
1380 tim_msix_id[1] = msix_id + 1;
1381 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 1376 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1382 alarm_msix_id); 1377 alarm_msix_id);
1383 1378
1379 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1384 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1380 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1385 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); 1381 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1386 1382
1387 /* enable the alarm vector */ 1383 /* enable the alarm vector */
1388 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); 1384 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1385 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1386 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1389 } 1387 }
1390} 1388}
1391 1389
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1406 if (vdev->config.intr_type == INTA) 1404 if (vdev->config.intr_type == INTA)
1407 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); 1405 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1408 else { 1406 else {
1409 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1407 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1408 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1411 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); 1409 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1412 1410
1413 /* disable the alarm vector */ 1411 /* disable the alarm vector */
1414 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 1412 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1413 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1415 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1414 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1416 } 1415 }
1417} 1416}
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev)
1765 1764
1766 vxge_debug_entryexit(VXGE_TRACE, 1765 vxge_debug_entryexit(VXGE_TRACE,
1767 "%s:%d Exiting...", __func__, __LINE__); 1766 "%s:%d Exiting...", __func__, __LINE__);
1768 return;
1769} 1767}
1770#endif 1768#endif
1771 1769
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2224 enum vxge_hw_status status; 2222 enum vxge_hw_status status;
2225 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; 2223 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2226 struct vxgedev *vdev = vpath->vdev; 2224 struct vxgedev *vdev = vpath->vdev;
2227 int alarm_msix_id = 2225 int msix_id = (vpath->handle->vpath->vp_id *
2228 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2226 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2229 2227
2230 for (i = 0; i < vdev->no_of_vpath; i++) { 2228 for (i = 0; i < vdev->no_of_vpath; i++) {
2231 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, 2229 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2232 alarm_msix_id);
2233 2230
2234 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2231 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2235 vdev->exec_mode); 2232 vdev->exec_mode);
2236 if (status == VXGE_HW_OK) { 2233 if (status == VXGE_HW_OK) {
2237 2234
2238 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2235 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2239 alarm_msix_id); 2236 msix_id);
2240 continue; 2237 continue;
2241 } 2238 }
2242 vxge_debug_intr(VXGE_ERR, 2239 vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2249static int vxge_alloc_msix(struct vxgedev *vdev) 2246static int vxge_alloc_msix(struct vxgedev *vdev)
2250{ 2247{
2251 int j, i, ret = 0; 2248 int j, i, ret = 0;
2252 int intr_cnt = 0; 2249 int msix_intr_vect = 0, temp;
2253 int alarm_msix_id = 0, msix_intr_vect = 0;
2254 vdev->intr_cnt = 0; 2250 vdev->intr_cnt = 0;
2255 2251
2252start:
2256 /* Tx/Rx MSIX Vectors count */ 2253 /* Tx/Rx MSIX Vectors count */
2257 vdev->intr_cnt = vdev->no_of_vpath * 2; 2254 vdev->intr_cnt = vdev->no_of_vpath * 2;
2258 2255
2259 /* Alarm MSIX Vectors count */ 2256 /* Alarm MSIX Vectors count */
2260 vdev->intr_cnt++; 2257 vdev->intr_cnt++;
2261 2258
2262 intr_cnt = (vdev->max_vpath_supported * 2) + 1; 2259 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
2263 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2264 GFP_KERNEL); 2260 GFP_KERNEL);
2265 if (!vdev->entries) { 2261 if (!vdev->entries) {
2266 vxge_debug_init(VXGE_ERR, 2262 vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2269 return -ENOMEM; 2265 return -ENOMEM;
2270 } 2266 }
2271 2267
2272 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), 2268 vdev->vxge_entries =
2273 GFP_KERNEL); 2269 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
2270 GFP_KERNEL);
2274 if (!vdev->vxge_entries) { 2271 if (!vdev->vxge_entries) {
2275 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2272 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2276 VXGE_DRIVER_NAME); 2273 VXGE_DRIVER_NAME);
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2278 return -ENOMEM; 2275 return -ENOMEM;
2279 } 2276 }
2280 2277
2281 /* Last vector in the list is used for alarm */ 2278 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2282 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2283 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2284 2279
2285 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2280 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2286 2281
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2298 } 2293 }
2299 2294
2300 /* Initialize the alarm vector */ 2295 /* Initialize the alarm vector */
2301 vdev->entries[j].entry = alarm_msix_id; 2296 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2302 vdev->vxge_entries[j].entry = alarm_msix_id; 2297 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2303 vdev->vxge_entries[j].in_use = 0; 2298 vdev->vxge_entries[j].in_use = 0;
2304 2299
2305 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); 2300 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2306 /* if driver request exceeeds available irq's, request with a small
2307 * number.
2308 */
2309 if (ret > 0) {
2310 vxge_debug_init(VXGE_ERR,
2311 "%s: MSI-X enable failed for %d vectors, available: %d",
2312 VXGE_DRIVER_NAME, intr_cnt, ret);
2313 vdev->max_vpath_supported = vdev->no_of_vpath;
2314 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2315
2316 /* Reset the alarm vector setting */
2317 vdev->entries[j].entry = 0;
2318 vdev->vxge_entries[j].entry = 0;
2319
2320 /* Initialize the alarm vector with new setting */
2321 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2322 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2323 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2324
2325 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2326 if (!ret)
2327 vxge_debug_init(VXGE_ERR,
2328 "%s: MSI-X enabled for %d vectors",
2329 VXGE_DRIVER_NAME, intr_cnt);
2330 }
2331 2301
2332 if (ret) { 2302 if (ret > 0) {
2333 vxge_debug_init(VXGE_ERR, 2303 vxge_debug_init(VXGE_ERR,
2334 "%s: MSI-X enable failed for %d vectors, ret: %d", 2304 "%s: MSI-X enable failed for %d vectors, ret: %d",
2335 VXGE_DRIVER_NAME, intr_cnt, ret); 2305 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2336 kfree(vdev->entries); 2306 kfree(vdev->entries);
2337 kfree(vdev->vxge_entries); 2307 kfree(vdev->vxge_entries);
2338 vdev->entries = NULL; 2308 vdev->entries = NULL;
2339 vdev->vxge_entries = NULL; 2309 vdev->vxge_entries = NULL;
2310
2311 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
2312 return -ENODEV;
2313 /* Try with less no of vector by reducing no of vpaths count */
2314 temp = (ret - 1)/2;
2315 vxge_close_vpaths(vdev, temp);
2316 vdev->no_of_vpath = temp;
2317 goto start;
2318 } else if (ret < 0)
2340 return -ENODEV; 2319 return -ENODEV;
2341 } 2320
2342 return 0; 2321 return 0;
2343} 2322}
2344 2323
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2346{ 2325{
2347 2326
2348 int i, ret = 0; 2327 int i, ret = 0;
2349 enum vxge_hw_status status;
2350 /* 0 - Tx, 1 - Rx */ 2328 /* 0 - Tx, 1 - Rx */
2351 int tim_msix_id[4]; 2329 int tim_msix_id[4] = {0, 1, 0, 0};
2352 int alarm_msix_id = 0, msix_intr_vect = 0; 2330
2353 vdev->intr_cnt = 0; 2331 vdev->intr_cnt = 0;
2354 2332
2355 /* allocate msix vectors */ 2333 /* allocate msix vectors */
2356 ret = vxge_alloc_msix(vdev); 2334 ret = vxge_alloc_msix(vdev);
2357 if (!ret) { 2335 if (!ret) {
2358 /* Last vector in the list is used for alarm */
2359 alarm_msix_id =
2360 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2361 for (i = 0; i < vdev->no_of_vpath; i++) { 2336 for (i = 0; i < vdev->no_of_vpath; i++) {
2362 2337
2363 /* If fifo or ring are not enabled 2338 /* If fifo or ring are not enabled
2364 the MSIX vector for that should be set to 0 2339 the MSIX vector for that should be set to 0
2365 Hence initializeing this array to all 0s. 2340 Hence initializeing this array to all 0s.
2366 */ 2341 */
2367 memset(tim_msix_id, 0, sizeof(tim_msix_id)); 2342 vdev->vpaths[i].ring.rx_vector_no =
2368 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2343 (vdev->vpaths[i].device_id *
2369 tim_msix_id[0] = msix_intr_vect; 2344 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2370
2371 tim_msix_id[1] = msix_intr_vect + 1;
2372 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2373 2345
2374 status = vxge_hw_vpath_msix_set( 2346 vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
2375 vdev->vpaths[i].handle, 2347 tim_msix_id, VXGE_ALARM_MSIX_ID);
2376 tim_msix_id, alarm_msix_id);
2377 if (status != VXGE_HW_OK) {
2378 vxge_debug_init(VXGE_ERR,
2379 "vxge_hw_vpath_msix_set "
2380 "failed with status : %x", status);
2381 kfree(vdev->entries);
2382 kfree(vdev->vxge_entries);
2383 pci_disable_msix(vdev->pdev);
2384 return -ENODEV;
2385 }
2386 } 2348 }
2387 } 2349 }
2388 2350
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2393{ 2355{
2394 int intr_cnt; 2356 int intr_cnt;
2395 2357
2396 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); 2358 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2397 intr_cnt++) { 2359 intr_cnt++) {
2398 if (vdev->vxge_entries[intr_cnt].in_use) { 2360 if (vdev->vxge_entries[intr_cnt].in_use) {
2399 synchronize_irq(vdev->entries[intr_cnt].vector); 2361 synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2458 switch (msix_idx) { 2420 switch (msix_idx) {
2459 case 0: 2421 case 0:
2460 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2422 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2461 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", 2423 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2462 vdev->ndev->name, pci_fun, vp_idx, 2424 vdev->ndev->name,
2463 vdev->entries[intr_cnt].entry); 2425 vdev->entries[intr_cnt].entry,
2426 pci_fun, vp_idx);
2464 ret = request_irq( 2427 ret = request_irq(
2465 vdev->entries[intr_cnt].vector, 2428 vdev->entries[intr_cnt].vector,
2466 vxge_tx_msix_handle, 0, 2429 vxge_tx_msix_handle, 0,
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2472 break; 2435 break;
2473 case 1: 2436 case 1:
2474 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2437 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2475 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", 2438 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2476 vdev->ndev->name, pci_fun, vp_idx, 2439 vdev->ndev->name,
2477 vdev->entries[intr_cnt].entry); 2440 vdev->entries[intr_cnt].entry,
2441 pci_fun, vp_idx);
2478 ret = request_irq( 2442 ret = request_irq(
2479 vdev->entries[intr_cnt].vector, 2443 vdev->entries[intr_cnt].vector,
2480 vxge_rx_msix_napi_handle, 2444 vxge_rx_msix_napi_handle,
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2502 if (irq_req) { 2466 if (irq_req) {
2503 /* We requested for this msix interrupt */ 2467 /* We requested for this msix interrupt */
2504 vdev->vxge_entries[intr_cnt].in_use = 1; 2468 vdev->vxge_entries[intr_cnt].in_use = 1;
2469 msix_idx += vdev->vpaths[vp_idx].device_id *
2470 VXGE_HW_VPATH_MSIX_ACTIVE;
2505 vxge_hw_vpath_msix_unmask( 2471 vxge_hw_vpath_msix_unmask(
2506 vdev->vpaths[vp_idx].handle, 2472 vdev->vpaths[vp_idx].handle,
2507 intr_idx); 2473 msix_idx);
2508 intr_cnt++; 2474 intr_cnt++;
2509 } 2475 }
2510 2476
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
2514 vp_idx++; 2480 vp_idx++;
2515 } 2481 }
2516 2482
2517 intr_cnt = vdev->max_vpath_supported * 2; 2483 intr_cnt = vdev->no_of_vpath * 2;
2518 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2484 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2519 "%s:vxge Alarm fn: %d MSI-X: %d", 2485 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2520 vdev->ndev->name, pci_fun, 2486 vdev->ndev->name,
2521 vdev->entries[intr_cnt].entry); 2487 vdev->entries[intr_cnt].entry,
2488 pci_fun);
2522 /* For Alarm interrupts */ 2489 /* For Alarm interrupts */
2523 ret = request_irq(vdev->entries[intr_cnt].vector, 2490 ret = request_irq(vdev->entries[intr_cnt].vector,
2524 vxge_alarm_msix_handle, 0, 2491 vxge_alarm_msix_handle, 0,
2525 vdev->desc[intr_cnt], 2492 vdev->desc[intr_cnt],
2526 &vdev->vpaths[vp_idx]); 2493 &vdev->vpaths[0]);
2527 if (ret) { 2494 if (ret) {
2528 vxge_debug_init(VXGE_ERR, 2495 vxge_debug_init(VXGE_ERR,
2529 "%s: MSIX - %d Registration failed", 2496 "%s: MSIX - %d Registration failed",
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
2536 goto INTA_MODE; 2503 goto INTA_MODE;
2537 } 2504 }
2538 2505
2506 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2507 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2539 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2508 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2540 intr_idx - 2); 2509 msix_idx);
2541 vdev->vxge_entries[intr_cnt].in_use = 1; 2510 vdev->vxge_entries[intr_cnt].in_use = 1;
2542 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; 2511 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2543 } 2512 }
2544INTA_MODE: 2513INTA_MODE:
2545#endif 2514#endif
2546 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2547 2515
2548 if (vdev->config.intr_type == INTA) { 2516 if (vdev->config.intr_type == INTA) {
2517 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2518 "%s:vxge:INTA", vdev->ndev->name);
2549 vxge_hw_device_set_intr_type(vdev->devh, 2519 vxge_hw_device_set_intr_type(vdev->devh,
2550 VXGE_HW_INTR_MODE_IRQLINE); 2520 VXGE_HW_INTR_MODE_IRQLINE);
2551 vxge_hw_vpath_tti_ci_set(vdev->devh, 2521 vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
2844 for (i = 0; i < vdev->no_of_vpath; i++) 2814 for (i = 0; i < vdev->no_of_vpath; i++)
2845 netif_napi_del(&vdev->vpaths[i].ring.napi); 2815 netif_napi_del(&vdev->vpaths[i].ring.napi);
2846 } 2816 }
2847 return;
2848} 2817}
2849 2818
2850int do_vxge_close(struct net_device *dev, int do_io) 2819int do_vxge_close(struct net_device *dev, int do_io)
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void)
3529 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3498 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3530 bw_percentage[i] = bw_percentage[0]; 3499 bw_percentage[i] = bw_percentage[0];
3531 } 3500 }
3532
3533 return;
3534} 3501}
3535 3502
3536/* 3503/*
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
3995 netif_device_attach(netdev); 3962 netif_device_attach(netdev);
3996} 3963}
3997 3964
3965static inline u32 vxge_get_num_vfs(u64 function_mode)
3966{
3967 u32 num_functions = 0;
3968
3969 switch (function_mode) {
3970 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
3971 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
3972 num_functions = 8;
3973 break;
3974 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
3975 num_functions = 1;
3976 break;
3977 case VXGE_HW_FUNCTION_MODE_SRIOV:
3978 case VXGE_HW_FUNCTION_MODE_MRIOV:
3979 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
3980 num_functions = 17;
3981 break;
3982 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
3983 num_functions = 4;
3984 break;
3985 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
3986 num_functions = 2;
3987 break;
3988 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
3989 num_functions = 8; /* TODO */
3990 break;
3991 }
3992 return num_functions;
3993}
3994
3998/** 3995/**
3999 * vxge_probe 3996 * vxge_probe
4000 * @pdev : structure containing the PCI related information of the device. 3997 * @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4022 u8 *macaddr; 4019 u8 *macaddr;
4023 struct vxge_mac_addrs *entry; 4020 struct vxge_mac_addrs *entry;
4024 static int bus = -1, device = -1; 4021 static int bus = -1, device = -1;
4022 u32 host_type;
4025 u8 new_device = 0; 4023 u8 new_device = 0;
4024 enum vxge_hw_status is_privileged;
4025 u32 function_mode;
4026 u32 num_vfs = 0;
4026 4027
4027 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 4028 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4028 attr.pdev = pdev; 4029 attr.pdev = pdev;
4029 4030
4030 if (bus != pdev->bus->number) 4031 /* In SRIOV-17 mode, functions of the same adapter
4031 new_device = 1; 4032 * can be deployed on different buses */
4032 if (device != PCI_SLOT(pdev->devfn)) 4033 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
4034 (device != PCI_SLOT(pdev->devfn))))
4033 new_device = 1; 4035 new_device = 1;
4034 4036
4035 bus = pdev->bus->number; 4037 bus = pdev->bus->number;
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4046 driver_config->total_dev_cnt); 4048 driver_config->total_dev_cnt);
4047 driver_config->config_dev_cnt = 0; 4049 driver_config->config_dev_cnt = 0;
4048 driver_config->total_dev_cnt = 0; 4050 driver_config->total_dev_cnt = 0;
4049 driver_config->g_no_cpus = 0;
4050 } 4051 }
4051 4052 /* Now making the CPU based no of vpath calculation
4053 * applicable for individual functions as well.
4054 */
4055 driver_config->g_no_cpus = 0;
4052 driver_config->vpath_per_dev = max_config_vpath; 4056 driver_config->vpath_per_dev = max_config_vpath;
4053 4057
4054 driver_config->total_dev_cnt++; 4058 driver_config->total_dev_cnt++;
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4161 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4165 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4162 (unsigned long long)vpath_mask); 4166 (unsigned long long)vpath_mask);
4163 4167
4168 function_mode = ll_config.device_hw_info.function_mode;
4169 host_type = ll_config.device_hw_info.host_type;
4170 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4171 ll_config.device_hw_info.func_id);
4172
4164 /* Check how many vpaths are available */ 4173 /* Check how many vpaths are available */
4165 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4174 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4166 if (!((vpath_mask) & vxge_mBIT(i))) 4175 if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4168 max_vpath_supported++; 4177 max_vpath_supported++;
4169 } 4178 }
4170 4179
4180 if (new_device)
4181 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4182
4171 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4183 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4172 if ((VXGE_HW_FUNCTION_MODE_SRIOV == 4184 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4173 ll_config.device_hw_info.function_mode) && 4185 (ll_config.intr_type != INTA) &&
4174 (max_config_dev > 1) && (pdev->is_physfn)) { 4186 (is_privileged == VXGE_HW_OK)) {
4175 ret = pci_enable_sriov(pdev, max_config_dev - 1); 4187 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4176 if (ret) 4188 ? (max_config_dev - 1) : num_vfs);
4177 vxge_debug_ll_config(VXGE_ERR, 4189 if (ret)
4178 "Failed to enable SRIOV: %d \n", ret); 4190 vxge_debug_ll_config(VXGE_ERR,
4191 "Failed in enabling SRIOV mode: %d\n", ret);
4179 } 4192 }
4180 4193
4181 /* 4194 /*