diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-07-01 07:36:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-01 12:56:00 -0400 |
commit | f37bda92461313ad3bbfbf5660adc849c69718bf (patch) | |
tree | a5fe4737ca6b8fcbe2cf9b58466d6340ee12fe56 /drivers/infiniband/hw/ipath/ipath_driver.c | |
parent | 06993ca6bc46419027b45198a58447f4f05c14f6 (diff) |
[PATCH] IB/ipath: memory management cleanups
Made in-memory rcvhdrq tail update be in dma_alloc'ed memory, not random user
or special kernel (needed for ppc, also "just the right thing to do").
Some cleanups to make unexpected link transitions less likely to produce
complaints about packet errors, and also to not leave SMA packets stuck and
unable to go out.
A few other random debug and comment cleanups.
Always init rcvhdrq head/tail registers to 0, to avoid race conditions (should
have been that way some time ago).
Signed-off-by: Dave Olson <dave.olson@qlogic.com>
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_driver.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 225 |
1 files changed, 92 insertions, 133 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index c92f8e0a117..0b88642381f 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -131,14 +131,6 @@ static struct pci_driver ipath_driver = { | |||
131 | .id_table = ipath_pci_tbl, | 131 | .id_table = ipath_pci_tbl, |
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* | ||
135 | * This is where port 0's rcvhdrtail register is written back; we also | ||
136 | * want nothing else sharing the cache line, so make it a cache line | ||
137 | * in size. Used for all units. | ||
138 | */ | ||
139 | volatile __le64 *ipath_port0_rcvhdrtail; | ||
140 | dma_addr_t ipath_port0_rcvhdrtail_dma; | ||
141 | static int port0_rcvhdrtail_refs; | ||
142 | 134 | ||
143 | static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, | 135 | static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, |
144 | u32 *bar0, u32 *bar1) | 136 | u32 *bar0, u32 *bar1) |
@@ -268,47 +260,6 @@ int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp) | |||
268 | return nunits; | 260 | return nunits; |
269 | } | 261 | } |
270 | 262 | ||
271 | static int init_port0_rcvhdrtail(struct pci_dev *pdev) | ||
272 | { | ||
273 | int ret; | ||
274 | |||
275 | mutex_lock(&ipath_mutex); | ||
276 | |||
277 | if (!ipath_port0_rcvhdrtail) { | ||
278 | ipath_port0_rcvhdrtail = | ||
279 | dma_alloc_coherent(&pdev->dev, | ||
280 | IPATH_PORT0_RCVHDRTAIL_SIZE, | ||
281 | &ipath_port0_rcvhdrtail_dma, | ||
282 | GFP_KERNEL); | ||
283 | |||
284 | if (!ipath_port0_rcvhdrtail) { | ||
285 | ret = -ENOMEM; | ||
286 | goto bail; | ||
287 | } | ||
288 | } | ||
289 | port0_rcvhdrtail_refs++; | ||
290 | ret = 0; | ||
291 | |||
292 | bail: | ||
293 | mutex_unlock(&ipath_mutex); | ||
294 | |||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev) | ||
299 | { | ||
300 | mutex_lock(&ipath_mutex); | ||
301 | |||
302 | if (!--port0_rcvhdrtail_refs) { | ||
303 | dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE, | ||
304 | (void *) ipath_port0_rcvhdrtail, | ||
305 | ipath_port0_rcvhdrtail_dma); | ||
306 | ipath_port0_rcvhdrtail = NULL; | ||
307 | } | ||
308 | |||
309 | mutex_unlock(&ipath_mutex); | ||
310 | } | ||
311 | |||
312 | /* | 263 | /* |
313 | * These next two routines are placeholders in case we don't have per-arch | 264 | * These next two routines are placeholders in case we don't have per-arch |
314 | * code for controlling write combining. If explicit control of write | 265 | * code for controlling write combining. If explicit control of write |
@@ -333,20 +284,12 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
333 | u32 bar0 = 0, bar1 = 0; | 284 | u32 bar0 = 0, bar1 = 0; |
334 | u8 rev; | 285 | u8 rev; |
335 | 286 | ||
336 | ret = init_port0_rcvhdrtail(pdev); | ||
337 | if (ret < 0) { | ||
338 | printk(KERN_ERR IPATH_DRV_NAME | ||
339 | ": Could not allocate port0_rcvhdrtail: error %d\n", | ||
340 | -ret); | ||
341 | goto bail; | ||
342 | } | ||
343 | |||
344 | dd = ipath_alloc_devdata(pdev); | 287 | dd = ipath_alloc_devdata(pdev); |
345 | if (IS_ERR(dd)) { | 288 | if (IS_ERR(dd)) { |
346 | ret = PTR_ERR(dd); | 289 | ret = PTR_ERR(dd); |
347 | printk(KERN_ERR IPATH_DRV_NAME | 290 | printk(KERN_ERR IPATH_DRV_NAME |
348 | ": Could not allocate devdata: error %d\n", -ret); | 291 | ": Could not allocate devdata: error %d\n", -ret); |
349 | goto bail_rcvhdrtail; | 292 | goto bail; |
350 | } | 293 | } |
351 | 294 | ||
352 | ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit); | 295 | ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit); |
@@ -574,9 +517,6 @@ bail_disable: | |||
574 | bail_devdata: | 517 | bail_devdata: |
575 | ipath_free_devdata(pdev, dd); | 518 | ipath_free_devdata(pdev, dd); |
576 | 519 | ||
577 | bail_rcvhdrtail: | ||
578 | cleanup_port0_rcvhdrtail(pdev); | ||
579 | |||
580 | bail: | 520 | bail: |
581 | return ret; | 521 | return ret; |
582 | } | 522 | } |
@@ -608,7 +548,6 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) | |||
608 | pci_disable_device(pdev); | 548 | pci_disable_device(pdev); |
609 | 549 | ||
610 | ipath_free_devdata(pdev, dd); | 550 | ipath_free_devdata(pdev, dd); |
611 | cleanup_port0_rcvhdrtail(pdev); | ||
612 | } | 551 | } |
613 | 552 | ||
614 | /* general driver use */ | 553 | /* general driver use */ |
@@ -1383,26 +1322,20 @@ bail: | |||
1383 | * @dd: the infinipath device | 1322 | * @dd: the infinipath device |
1384 | * @pd: the port data | 1323 | * @pd: the port data |
1385 | * | 1324 | * |
1386 | * this *must* be physically contiguous memory, and for now, | 1325 | * this must be contiguous memory (from an i/o perspective), and must be |
1387 | * that limits it to what kmalloc can do. | 1326 | * DMA'able (which means for some systems, it will go through an IOMMU, |
1327 | * or be forced into a low address range). | ||
1388 | */ | 1328 | */ |
1389 | int ipath_create_rcvhdrq(struct ipath_devdata *dd, | 1329 | int ipath_create_rcvhdrq(struct ipath_devdata *dd, |
1390 | struct ipath_portdata *pd) | 1330 | struct ipath_portdata *pd) |
1391 | { | 1331 | { |
1392 | int ret = 0, amt; | 1332 | int ret = 0; |
1393 | 1333 | ||
1394 | amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | ||
1395 | sizeof(u32), PAGE_SIZE); | ||
1396 | if (!pd->port_rcvhdrq) { | 1334 | if (!pd->port_rcvhdrq) { |
1397 | /* | 1335 | dma_addr_t phys_hdrqtail; |
1398 | * not using REPEAT isn't viable; at 128KB, we can easily | ||
1399 | * fail this. The problem with REPEAT is we can block here | ||
1400 | * "forever". There isn't an inbetween, unfortunately. We | ||
1401 | * could reduce the risk by never freeing the rcvhdrq except | ||
1402 | * at unload, but even then, the first time a port is used, | ||
1403 | * we could delay for some time... | ||
1404 | */ | ||
1405 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; | 1336 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; |
1337 | int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | ||
1338 | sizeof(u32), PAGE_SIZE); | ||
1406 | 1339 | ||
1407 | pd->port_rcvhdrq = dma_alloc_coherent( | 1340 | pd->port_rcvhdrq = dma_alloc_coherent( |
1408 | &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys, | 1341 | &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys, |
@@ -1415,6 +1348,16 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, | |||
1415 | ret = -ENOMEM; | 1348 | ret = -ENOMEM; |
1416 | goto bail; | 1349 | goto bail; |
1417 | } | 1350 | } |
1351 | pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent( | ||
1352 | &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL); | ||
1353 | if (!pd->port_rcvhdrtail_kvaddr) { | ||
1354 | ipath_dev_err(dd, "attempt to allocate 1 page " | ||
1355 | "for port %u rcvhdrqtailaddr failed\n", | ||
1356 | pd->port_port); | ||
1357 | ret = -ENOMEM; | ||
1358 | goto bail; | ||
1359 | } | ||
1360 | pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail; | ||
1418 | 1361 | ||
1419 | pd->port_rcvhdrq_size = amt; | 1362 | pd->port_rcvhdrq_size = amt; |
1420 | 1363 | ||
@@ -1424,20 +1367,28 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, | |||
1424 | (unsigned long) pd->port_rcvhdrq_phys, | 1367 | (unsigned long) pd->port_rcvhdrq_phys, |
1425 | (unsigned long) pd->port_rcvhdrq_size, | 1368 | (unsigned long) pd->port_rcvhdrq_size, |
1426 | pd->port_port); | 1369 | pd->port_port); |
1427 | } else { | 1370 | |
1428 | /* | 1371 | ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n", |
1429 | * clear for security, sanity, and/or debugging, each | 1372 | pd->port_port, |
1430 | * time we reuse | 1373 | (unsigned long long) phys_hdrqtail); |
1431 | */ | ||
1432 | memset(pd->port_rcvhdrq, 0, amt); | ||
1433 | } | 1374 | } |
1375 | else | ||
1376 | ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " | ||
1377 | "hdrtailaddr@%p %llx physical\n", | ||
1378 | pd->port_port, pd->port_rcvhdrq, | ||
1379 | pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr, | ||
1380 | (unsigned long long)pd->port_rcvhdrqtailaddr_phys); | ||
1381 | |||
1382 | /* clear for security and sanity on each use */ | ||
1383 | memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); | ||
1384 | memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE); | ||
1434 | 1385 | ||
1435 | /* | 1386 | /* |
1436 | * tell chip each time we init it, even if we are re-using previous | 1387 | * tell chip each time we init it, even if we are re-using previous |
1437 | * memory (we zero it at process close) | 1388 | * memory (we zero the register at process close) |
1438 | */ | 1389 | */ |
1439 | ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n", | 1390 | ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr, |
1440 | pd->port_port, (unsigned long) pd->port_rcvhdrq_phys); | 1391 | pd->port_port, pd->port_rcvhdrqtailaddr_phys); |
1441 | ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, | 1392 | ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, |
1442 | pd->port_port, pd->port_rcvhdrq_phys); | 1393 | pd->port_port, pd->port_rcvhdrq_phys); |
1443 | 1394 | ||
@@ -1525,15 +1476,27 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | |||
1525 | [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", | 1476 | [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", |
1526 | [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" | 1477 | [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" |
1527 | }; | 1478 | }; |
1479 | int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & | ||
1480 | INFINIPATH_IBCC_LINKCMD_MASK; | ||
1481 | |||
1528 | ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate " | 1482 | ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate " |
1529 | "is %s\n", dd->ipath_unit, | 1483 | "is %s\n", dd->ipath_unit, |
1530 | what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & | 1484 | what[linkcmd], |
1531 | INFINIPATH_IBCC_LINKCMD_MASK], | ||
1532 | ipath_ibcstatus_str[ | 1485 | ipath_ibcstatus_str[ |
1533 | (ipath_read_kreg64 | 1486 | (ipath_read_kreg64 |
1534 | (dd, dd->ipath_kregs->kr_ibcstatus) >> | 1487 | (dd, dd->ipath_kregs->kr_ibcstatus) >> |
1535 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & | 1488 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & |
1536 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); | 1489 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); |
1490 | /* flush all queued sends when going to DOWN or INIT, to be sure that | ||
1491 | * they don't block SMA and other MAD packets */ | ||
1492 | if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { | ||
1493 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
1494 | INFINIPATH_S_ABORT); | ||
1495 | ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf, | ||
1496 | (unsigned)(dd->ipath_piobcnt2k + | ||
1497 | dd->ipath_piobcnt4k) - | ||
1498 | dd->ipath_lastport_piobuf); | ||
1499 | } | ||
1537 | 1500 | ||
1538 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | 1501 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, |
1539 | dd->ipath_ibcctrl | which); | 1502 | dd->ipath_ibcctrl | which); |
@@ -1681,60 +1644,54 @@ void ipath_shutdown_device(struct ipath_devdata *dd) | |||
1681 | /** | 1644 | /** |
1682 | * ipath_free_pddata - free a port's allocated data | 1645 | * ipath_free_pddata - free a port's allocated data |
1683 | * @dd: the infinipath device | 1646 | * @dd: the infinipath device |
1684 | * @port: the port | 1647 | * @pd: the portdata structure |
1685 | * @freehdrq: free the port data structure if true | ||
1686 | * | 1648 | * |
1687 | * when closing, free up any allocated data for a port, if the | 1649 | * free up any allocated data for a port |
1688 | * reference count goes to zero | 1650 | * This should not touch anything that would affect a simultaneous |
1689 | * Note: this also optionally frees the portdata itself! | 1651 | * re-allocation of port data, because it is called after ipath_mutex |
1690 | * Any changes here have to be matched up with the reinit case | 1652 | * is released (and can be called from reinit as well). |
1691 | * of ipath_init_chip(), which calls this routine on reinit after reset. | 1653 | * It should never change any chip state, or global driver state. |
1654 | * (The only exception to global state is freeing the port0 port0_skbs.) | ||
1692 | */ | 1655 | */ |
1693 | void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq) | 1656 | void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd) |
1694 | { | 1657 | { |
1695 | struct ipath_portdata *pd = dd->ipath_pd[port]; | ||
1696 | |||
1697 | if (!pd) | 1658 | if (!pd) |
1698 | return; | 1659 | return; |
1699 | if (freehdrq) | 1660 | |
1700 | /* | 1661 | if (pd->port_rcvhdrq) { |
1701 | * only clear and free portdata if we are going to also | ||
1702 | * release the hdrq, otherwise we leak the hdrq on each | ||
1703 | * open/close cycle | ||
1704 | */ | ||
1705 | dd->ipath_pd[port] = NULL; | ||
1706 | if (freehdrq && pd->port_rcvhdrq) { | ||
1707 | ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p " | 1662 | ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p " |
1708 | "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq, | 1663 | "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq, |
1709 | (unsigned long) pd->port_rcvhdrq_size); | 1664 | (unsigned long) pd->port_rcvhdrq_size); |
1710 | dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size, | 1665 | dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size, |
1711 | pd->port_rcvhdrq, pd->port_rcvhdrq_phys); | 1666 | pd->port_rcvhdrq, pd->port_rcvhdrq_phys); |
1712 | pd->port_rcvhdrq = NULL; | 1667 | pd->port_rcvhdrq = NULL; |
1668 | if (pd->port_rcvhdrtail_kvaddr) { | ||
1669 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
1670 | (void *)pd->port_rcvhdrtail_kvaddr, | ||
1671 | pd->port_rcvhdrqtailaddr_phys); | ||
1672 | pd->port_rcvhdrtail_kvaddr = NULL; | ||
1673 | } | ||
1713 | } | 1674 | } |
1714 | if (port && pd->port_rcvegrbuf) { | 1675 | if (pd->port_port && pd->port_rcvegrbuf) { |
1715 | /* always free this */ | 1676 | unsigned e; |
1716 | if (pd->port_rcvegrbuf) { | 1677 | |
1717 | unsigned e; | 1678 | for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { |
1718 | 1679 | void *base = pd->port_rcvegrbuf[e]; | |
1719 | for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { | 1680 | size_t size = pd->port_rcvegrbuf_size; |
1720 | void *base = pd->port_rcvegrbuf[e]; | 1681 | |
1721 | size_t size = pd->port_rcvegrbuf_size; | 1682 | ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), " |
1722 | 1683 | "chunk %u/%u\n", base, | |
1723 | ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), " | 1684 | (unsigned long) size, |
1724 | "chunk %u/%u\n", base, | 1685 | e, pd->port_rcvegrbuf_chunks); |
1725 | (unsigned long) size, | 1686 | dma_free_coherent(&dd->pcidev->dev, size, |
1726 | e, pd->port_rcvegrbuf_chunks); | 1687 | base, pd->port_rcvegrbuf_phys[e]); |
1727 | dma_free_coherent( | ||
1728 | &dd->pcidev->dev, size, base, | ||
1729 | pd->port_rcvegrbuf_phys[e]); | ||
1730 | } | ||
1731 | vfree(pd->port_rcvegrbuf); | ||
1732 | pd->port_rcvegrbuf = NULL; | ||
1733 | vfree(pd->port_rcvegrbuf_phys); | ||
1734 | pd->port_rcvegrbuf_phys = NULL; | ||
1735 | } | 1688 | } |
1689 | vfree(pd->port_rcvegrbuf); | ||
1690 | pd->port_rcvegrbuf = NULL; | ||
1691 | vfree(pd->port_rcvegrbuf_phys); | ||
1692 | pd->port_rcvegrbuf_phys = NULL; | ||
1736 | pd->port_rcvegrbuf_chunks = 0; | 1693 | pd->port_rcvegrbuf_chunks = 0; |
1737 | } else if (port == 0 && dd->ipath_port0_skbs) { | 1694 | } else if (pd->port_port == 0 && dd->ipath_port0_skbs) { |
1738 | unsigned e; | 1695 | unsigned e; |
1739 | struct sk_buff **skbs = dd->ipath_port0_skbs; | 1696 | struct sk_buff **skbs = dd->ipath_port0_skbs; |
1740 | 1697 | ||
@@ -1746,10 +1703,8 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq) | |||
1746 | dev_kfree_skb(skbs[e]); | 1703 | dev_kfree_skb(skbs[e]); |
1747 | vfree(skbs); | 1704 | vfree(skbs); |
1748 | } | 1705 | } |
1749 | if (freehdrq) { | 1706 | kfree(pd->port_tid_pg_list); |
1750 | kfree(pd->port_tid_pg_list); | 1707 | kfree(pd); |
1751 | kfree(pd); | ||
1752 | } | ||
1753 | } | 1708 | } |
1754 | 1709 | ||
1755 | static int __init infinipath_init(void) | 1710 | static int __init infinipath_init(void) |
@@ -1874,10 +1829,14 @@ static void cleanup_device(struct ipath_devdata *dd) | |||
1874 | 1829 | ||
1875 | /* | 1830 | /* |
1876 | * free any resources still in use (usually just kernel ports) | 1831 | * free any resources still in use (usually just kernel ports) |
1877 | * at unload | 1832 | * at unload; we do for portcnt, not cfgports, because cfgports |
1833 | * could have changed while we were loaded. | ||
1878 | */ | 1834 | */ |
1879 | for (port = 0; port < dd->ipath_cfgports; port++) | 1835 | for (port = 0; port < dd->ipath_portcnt; port++) { |
1880 | ipath_free_pddata(dd, port, 1); | 1836 | struct ipath_portdata *pd = dd->ipath_pd[port]; |
1837 | dd->ipath_pd[port] = NULL; | ||
1838 | ipath_free_pddata(dd, pd); | ||
1839 | } | ||
1881 | kfree(dd->ipath_pd); | 1840 | kfree(dd->ipath_pd); |
1882 | /* | 1841 | /* |
1883 | * debuggability, in case some cleanup path tries to use it | 1842 | * debuggability, in case some cleanup path tries to use it |