aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_driver.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_driver.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c60
1 files changed, 36 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 467816043d10..68fc9b5a4ad8 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -755,8 +755,8 @@ static void get_rhf_errstring(u32 err, char *msg, size_t len)
755static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, 755static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
756 int err) 756 int err)
757{ 757{
758 return dd->ipath_port0_skbs ? 758 return dd->ipath_port0_skbinfo ?
759 (void *)dd->ipath_port0_skbs[bufnum]->data : NULL; 759 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
760} 760}
761 761
762/** 762/**
@@ -778,31 +778,34 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
778 */ 778 */
779 779
780 /* 780 /*
781 * We need 4 extra bytes for unaligned transfer copying 781 * We need 2 extra bytes for ipath_ether data sent in the
782 * key header. In order to keep everything dword aligned,
783 * we'll reserve 4 bytes.
782 */ 784 */
785 len = dd->ipath_ibmaxlen + 4;
786
783 if (dd->ipath_flags & IPATH_4BYTE_TID) { 787 if (dd->ipath_flags & IPATH_4BYTE_TID) {
784 /* we need a 4KB multiple alignment, and there is no way 788 /* We need a 2KB multiple alignment, and there is no way
785 * to do it except to allocate extra and then skb_reserve 789 * to do it except to allocate extra and then skb_reserve
786 * enough to bring it up to the right alignment. 790 * enough to bring it up to the right alignment.
787 */ 791 */
788 len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1; 792 len += 2047;
789 } 793 }
790 else 794
791 len = dd->ipath_ibmaxlen + 4;
792 skb = __dev_alloc_skb(len, gfp_mask); 795 skb = __dev_alloc_skb(len, gfp_mask);
793 if (!skb) { 796 if (!skb) {
794 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n", 797 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
795 len); 798 len);
796 goto bail; 799 goto bail;
797 } 800 }
801
802 skb_reserve(skb, 4);
803
798 if (dd->ipath_flags & IPATH_4BYTE_TID) { 804 if (dd->ipath_flags & IPATH_4BYTE_TID) {
799 u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4); 805 u32 una = (unsigned long)skb->data & 2047;
800 if (una) 806 if (una)
801 skb_reserve(skb, 4 + (1 << 11) - una); 807 skb_reserve(skb, 2048 - una);
802 else 808 }
803 skb_reserve(skb, 4);
804 } else
805 skb_reserve(skb, 4);
806 809
807bail: 810bail:
808 return skb; 811 return skb;
@@ -1345,8 +1348,9 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1345 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " 1348 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1346 "hdrtailaddr@%p %llx physical\n", 1349 "hdrtailaddr@%p %llx physical\n",
1347 pd->port_port, pd->port_rcvhdrq, 1350 pd->port_port, pd->port_rcvhdrq,
1348 pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr, 1351 (unsigned long long) pd->port_rcvhdrq_phys,
1349 (unsigned long long)pd->port_rcvhdrqtailaddr_phys); 1352 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1353 pd->port_rcvhdrqtailaddr_phys);
1350 1354
1351 /* clear for security and sanity on each use */ 1355 /* clear for security and sanity on each use */
1352 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); 1356 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
@@ -1827,17 +1831,22 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
1827 kfree(pd->port_rcvegrbuf_phys); 1831 kfree(pd->port_rcvegrbuf_phys);
1828 pd->port_rcvegrbuf_phys = NULL; 1832 pd->port_rcvegrbuf_phys = NULL;
1829 pd->port_rcvegrbuf_chunks = 0; 1833 pd->port_rcvegrbuf_chunks = 0;
1830 } else if (pd->port_port == 0 && dd->ipath_port0_skbs) { 1834 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
1831 unsigned e; 1835 unsigned e;
1832 struct sk_buff **skbs = dd->ipath_port0_skbs; 1836 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
1833 1837
1834 dd->ipath_port0_skbs = NULL; 1838 dd->ipath_port0_skbinfo = NULL;
1835 ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs " 1839 ipath_cdbg(VERBOSE, "free closed port %d "
1836 "@ %p\n", pd->port_port, skbs); 1840 "ipath_port0_skbinfo @ %p\n", pd->port_port,
1841 skbinfo);
1837 for (e = 0; e < dd->ipath_rcvegrcnt; e++) 1842 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
1838 if (skbs[e]) 1843 if (skbinfo[e].skb) {
1839 dev_kfree_skb(skbs[e]); 1844 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
1840 vfree(skbs); 1845 dd->ipath_ibmaxlen,
1846 PCI_DMA_FROMDEVICE);
1847 dev_kfree_skb(skbinfo[e].skb);
1848 }
1849 vfree(skbinfo);
1841 } 1850 }
1842 kfree(pd->port_tid_pg_list); 1851 kfree(pd->port_tid_pg_list);
1843 vfree(pd->subport_uregbase); 1852 vfree(pd->subport_uregbase);
@@ -1934,7 +1943,7 @@ static void cleanup_device(struct ipath_devdata *dd)
1934 1943
1935 if (dd->ipath_pioavailregs_dma) { 1944 if (dd->ipath_pioavailregs_dma) {
1936 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1945 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1937 dd->ipath_pioavailregs_dma, 1946 (void *) dd->ipath_pioavailregs_dma,
1938 dd->ipath_pioavailregs_phys); 1947 dd->ipath_pioavailregs_phys);
1939 dd->ipath_pioavailregs_dma = NULL; 1948 dd->ipath_pioavailregs_dma = NULL;
1940 } 1949 }
@@ -1947,6 +1956,7 @@ static void cleanup_device(struct ipath_devdata *dd)
1947 1956
1948 if (dd->ipath_pageshadow) { 1957 if (dd->ipath_pageshadow) {
1949 struct page **tmpp = dd->ipath_pageshadow; 1958 struct page **tmpp = dd->ipath_pageshadow;
1959 dma_addr_t *tmpd = dd->ipath_physshadow;
1950 int i, cnt = 0; 1960 int i, cnt = 0;
1951 1961
1952 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still " 1962 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
@@ -1957,6 +1967,8 @@ static void cleanup_device(struct ipath_devdata *dd)
1957 for (i = port_tidbase; i < maxtid; i++) { 1967 for (i = port_tidbase; i < maxtid; i++) {
1958 if (!tmpp[i]) 1968 if (!tmpp[i])
1959 continue; 1969 continue;
1970 pci_unmap_page(dd->pcidev, tmpd[i],
1971 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1960 ipath_release_user_pages(&tmpp[i], 1); 1972 ipath_release_user_pages(&tmpp[i], 1);
1961 tmpp[i] = NULL; 1973 tmpp[i] = NULL;
1962 cnt++; 1974 cnt++;