diff options
author | chas williams - CONTRACTOR <chas@cmf.nrl.navy.mil> | 2010-05-29 05:05:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-31 03:27:47 -0400 |
commit | e623d62512dcb68a1c4844f4d7b5c8f3aff7d0da (patch) | |
tree | 24d697011fcf477eb1781fc89f8e63c5edc735ec /drivers/atm | |
parent | 1d927870e583d19afa17b2062b65e8f74a83b742 (diff) |
atm: [he] rewrite buffer handling in receive path
Instead of a fixed list of buffers, use the buffer pool correctly and
keep track of the outstanding buffer indexes using a fixed table.
Resolves reported HBUF_ERR's -- failures due to lack of receive buffers.
Signed-off-by: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/atm')
-rw-r--r-- | drivers/atm/he.c | 181 | ||||
-rw-r--r-- | drivers/atm/he.h | 48 |
2 files changed, 126 insertions, 103 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index c725494e0d41..ea9cbe596a28 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/timer.h> | 67 | #include <linux/timer.h> |
68 | #include <linux/interrupt.h> | 68 | #include <linux/interrupt.h> |
69 | #include <linux/dma-mapping.h> | 69 | #include <linux/dma-mapping.h> |
70 | #include <linux/bitmap.h> | ||
70 | #include <linux/slab.h> | 71 | #include <linux/slab.h> |
71 | #include <asm/io.h> | 72 | #include <asm/io.h> |
72 | #include <asm/byteorder.h> | 73 | #include <asm/byteorder.h> |
@@ -778,6 +779,8 @@ he_init_cs_block_rcm(struct he_dev *he_dev) | |||
778 | static int __devinit | 779 | static int __devinit |
779 | he_init_group(struct he_dev *he_dev, int group) | 780 | he_init_group(struct he_dev *he_dev, int group) |
780 | { | 781 | { |
782 | struct he_buff *heb, *next; | ||
783 | dma_addr_t mapping; | ||
781 | int i; | 784 | int i; |
782 | 785 | ||
783 | he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); | 786 | he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); |
@@ -786,12 +789,29 @@ he_init_group(struct he_dev *he_dev, int group) | |||
786 | he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), | 789 | he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), |
787 | G0_RBPS_BS + (group * 32)); | 790 | G0_RBPS_BS + (group * 32)); |
788 | 791 | ||
792 | /* bitmap table */ | ||
793 | he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) | ||
794 | * sizeof(unsigned long), GFP_KERNEL); | ||
795 | if (!he_dev->rbpl_table) { | ||
796 | hprintk("unable to allocate rbpl bitmap table\n"); | ||
797 | return -ENOMEM; | ||
798 | } | ||
799 | bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); | ||
800 | |||
801 | /* rbpl_virt 64-bit pointers */ | ||
802 | he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE | ||
803 | * sizeof(struct he_buff *), GFP_KERNEL); | ||
804 | if (!he_dev->rbpl_virt) { | ||
805 | hprintk("unable to allocate rbpl virt table\n"); | ||
806 | goto out_free_rbpl_table; | ||
807 | } | ||
808 | |||
789 | /* large buffer pool */ | 809 | /* large buffer pool */ |
790 | he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, | 810 | he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, |
791 | CONFIG_RBPL_BUFSIZE, 8, 0); | 811 | CONFIG_RBPL_BUFSIZE, 64, 0); |
792 | if (he_dev->rbpl_pool == NULL) { | 812 | if (he_dev->rbpl_pool == NULL) { |
793 | hprintk("unable to create rbpl pool\n"); | 813 | hprintk("unable to create rbpl pool\n"); |
794 | return -ENOMEM; | 814 | goto out_free_rbpl_virt; |
795 | } | 815 | } |
796 | 816 | ||
797 | he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, | 817 | he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, |
@@ -801,30 +821,29 @@ he_init_group(struct he_dev *he_dev, int group) | |||
801 | goto out_destroy_rbpl_pool; | 821 | goto out_destroy_rbpl_pool; |
802 | } | 822 | } |
803 | memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); | 823 | memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); |
804 | he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL); | 824 | |
805 | if (he_dev->rbpl_virt == NULL) { | 825 | INIT_LIST_HEAD(&he_dev->rbpl_outstanding); |
806 | hprintk("failed to alloc rbpl_virt\n"); | ||
807 | goto out_free_rbpl_base; | ||
808 | } | ||
809 | 826 | ||
810 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { | 827 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { |
811 | dma_addr_t dma_handle; | ||
812 | void *cpuaddr; | ||
813 | 828 | ||
814 | cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle); | 829 | heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); |
815 | if (cpuaddr == NULL) | 830 | if (!heb) |
816 | goto out_free_rbpl_virt; | 831 | goto out_free_rbpl; |
832 | heb->mapping = mapping; | ||
833 | list_add(&heb->entry, &he_dev->rbpl_outstanding); | ||
817 | 834 | ||
818 | he_dev->rbpl_virt[i].virt = cpuaddr; | 835 | set_bit(i, he_dev->rbpl_table); |
819 | he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF); | 836 | he_dev->rbpl_virt[i] = heb; |
820 | he_dev->rbpl_base[i].phys = dma_handle; | 837 | he_dev->rbpl_hint = i + 1; |
838 | he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; | ||
839 | he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); | ||
821 | } | 840 | } |
822 | he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; | 841 | he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; |
823 | 842 | ||
824 | he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); | 843 | he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); |
825 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), | 844 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), |
826 | G0_RBPL_T + (group * 32)); | 845 | G0_RBPL_T + (group * 32)); |
827 | he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4, | 846 | he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, |
828 | G0_RBPL_BS + (group * 32)); | 847 | G0_RBPL_BS + (group * 32)); |
829 | he_writel(he_dev, | 848 | he_writel(he_dev, |
830 | RBP_THRESH(CONFIG_RBPL_THRESH) | | 849 | RBP_THRESH(CONFIG_RBPL_THRESH) | |
@@ -838,7 +857,7 @@ he_init_group(struct he_dev *he_dev, int group) | |||
838 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); | 857 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); |
839 | if (he_dev->rbrq_base == NULL) { | 858 | if (he_dev->rbrq_base == NULL) { |
840 | hprintk("failed to allocate rbrq\n"); | 859 | hprintk("failed to allocate rbrq\n"); |
841 | goto out_free_rbpl_virt; | 860 | goto out_free_rbpl; |
842 | } | 861 | } |
843 | memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); | 862 | memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); |
844 | 863 | ||
@@ -879,19 +898,19 @@ out_free_rbpq_base: | |||
879 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * | 898 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * |
880 | sizeof(struct he_rbrq), he_dev->rbrq_base, | 899 | sizeof(struct he_rbrq), he_dev->rbrq_base, |
881 | he_dev->rbrq_phys); | 900 | he_dev->rbrq_phys); |
882 | i = CONFIG_RBPL_SIZE; | 901 | out_free_rbpl: |
883 | out_free_rbpl_virt: | 902 | list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
884 | while (i--) | 903 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
885 | pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt, | ||
886 | he_dev->rbpl_base[i].phys); | ||
887 | kfree(he_dev->rbpl_virt); | ||
888 | 904 | ||
889 | out_free_rbpl_base: | ||
890 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * | 905 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * |
891 | sizeof(struct he_rbp), he_dev->rbpl_base, | 906 | sizeof(struct he_rbp), he_dev->rbpl_base, |
892 | he_dev->rbpl_phys); | 907 | he_dev->rbpl_phys); |
893 | out_destroy_rbpl_pool: | 908 | out_destroy_rbpl_pool: |
894 | pci_pool_destroy(he_dev->rbpl_pool); | 909 | pci_pool_destroy(he_dev->rbpl_pool); |
910 | out_free_rbpl_virt: | ||
911 | kfree(he_dev->rbpl_virt); | ||
912 | out_free_rbpl_table: | ||
913 | kfree(he_dev->rbpl_table); | ||
895 | 914 | ||
896 | return -ENOMEM; | 915 | return -ENOMEM; |
897 | } | 916 | } |
@@ -1522,9 +1541,10 @@ he_start(struct atm_dev *dev) | |||
1522 | static void | 1541 | static void |
1523 | he_stop(struct he_dev *he_dev) | 1542 | he_stop(struct he_dev *he_dev) |
1524 | { | 1543 | { |
1525 | u16 command; | 1544 | struct he_buff *heb, *next; |
1526 | u32 gen_cntl_0, reg; | ||
1527 | struct pci_dev *pci_dev; | 1545 | struct pci_dev *pci_dev; |
1546 | u32 gen_cntl_0, reg; | ||
1547 | u16 command; | ||
1528 | 1548 | ||
1529 | pci_dev = he_dev->pci_dev; | 1549 | pci_dev = he_dev->pci_dev; |
1530 | 1550 | ||
@@ -1565,18 +1585,16 @@ he_stop(struct he_dev *he_dev) | |||
1565 | he_dev->hsp, he_dev->hsp_phys); | 1585 | he_dev->hsp, he_dev->hsp_phys); |
1566 | 1586 | ||
1567 | if (he_dev->rbpl_base) { | 1587 | if (he_dev->rbpl_base) { |
1568 | int i; | 1588 | list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
1569 | 1589 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); | |
1570 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { | ||
1571 | void *cpuaddr = he_dev->rbpl_virt[i].virt; | ||
1572 | dma_addr_t dma_handle = he_dev->rbpl_base[i].phys; | ||
1573 | 1590 | ||
1574 | pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle); | ||
1575 | } | ||
1576 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE | 1591 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE |
1577 | * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); | 1592 | * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); |
1578 | } | 1593 | } |
1579 | 1594 | ||
1595 | kfree(he_dev->rbpl_virt); | ||
1596 | kfree(he_dev->rbpl_table); | ||
1597 | |||
1580 | if (he_dev->rbpl_pool) | 1598 | if (he_dev->rbpl_pool) |
1581 | pci_pool_destroy(he_dev->rbpl_pool); | 1599 | pci_pool_destroy(he_dev->rbpl_pool); |
1582 | 1600 | ||
@@ -1609,13 +1627,13 @@ static struct he_tpd * | |||
1609 | __alloc_tpd(struct he_dev *he_dev) | 1627 | __alloc_tpd(struct he_dev *he_dev) |
1610 | { | 1628 | { |
1611 | struct he_tpd *tpd; | 1629 | struct he_tpd *tpd; |
1612 | dma_addr_t dma_handle; | 1630 | dma_addr_t mapping; |
1613 | 1631 | ||
1614 | tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle); | 1632 | tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); |
1615 | if (tpd == NULL) | 1633 | if (tpd == NULL) |
1616 | return NULL; | 1634 | return NULL; |
1617 | 1635 | ||
1618 | tpd->status = TPD_ADDR(dma_handle); | 1636 | tpd->status = TPD_ADDR(mapping); |
1619 | tpd->reserved = 0; | 1637 | tpd->reserved = 0; |
1620 | tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; | 1638 | tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; |
1621 | tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; | 1639 | tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; |
@@ -1644,13 +1662,12 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1644 | struct he_rbrq *rbrq_tail = (struct he_rbrq *) | 1662 | struct he_rbrq *rbrq_tail = (struct he_rbrq *) |
1645 | ((unsigned long)he_dev->rbrq_base | | 1663 | ((unsigned long)he_dev->rbrq_base | |
1646 | he_dev->hsp->group[group].rbrq_tail); | 1664 | he_dev->hsp->group[group].rbrq_tail); |
1647 | struct he_rbp *rbp = NULL; | ||
1648 | unsigned cid, lastcid = -1; | 1665 | unsigned cid, lastcid = -1; |
1649 | unsigned buf_len = 0; | ||
1650 | struct sk_buff *skb; | 1666 | struct sk_buff *skb; |
1651 | struct atm_vcc *vcc = NULL; | 1667 | struct atm_vcc *vcc = NULL; |
1652 | struct he_vcc *he_vcc; | 1668 | struct he_vcc *he_vcc; |
1653 | struct he_iovec *iov; | 1669 | struct he_buff *heb, *next; |
1670 | int i; | ||
1654 | int pdus_assembled = 0; | 1671 | int pdus_assembled = 0; |
1655 | int updated = 0; | 1672 | int updated = 0; |
1656 | 1673 | ||
@@ -1670,41 +1687,35 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1670 | RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", | 1687 | RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", |
1671 | RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); | 1688 | RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); |
1672 | 1689 | ||
1673 | rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; | 1690 | i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; |
1674 | 1691 | heb = he_dev->rbpl_virt[i]; | |
1675 | buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; | ||
1676 | cid = RBRQ_CID(he_dev->rbrq_head); | ||
1677 | 1692 | ||
1693 | cid = RBRQ_CID(he_dev->rbrq_head); | ||
1678 | if (cid != lastcid) | 1694 | if (cid != lastcid) |
1679 | vcc = __find_vcc(he_dev, cid); | 1695 | vcc = __find_vcc(he_dev, cid); |
1680 | lastcid = cid; | 1696 | lastcid = cid; |
1681 | 1697 | ||
1682 | if (vcc == NULL) { | 1698 | if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { |
1683 | hprintk("vcc == NULL (cid 0x%x)\n", cid); | 1699 | hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); |
1684 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) | 1700 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
1685 | rbp->status &= ~RBP_LOANED; | 1701 | clear_bit(i, he_dev->rbpl_table); |
1702 | list_del(&heb->entry); | ||
1703 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); | ||
1704 | } | ||
1686 | 1705 | ||
1687 | goto next_rbrq_entry; | 1706 | goto next_rbrq_entry; |
1688 | } | 1707 | } |
1689 | 1708 | ||
1690 | he_vcc = HE_VCC(vcc); | ||
1691 | if (he_vcc == NULL) { | ||
1692 | hprintk("he_vcc == NULL (cid 0x%x)\n", cid); | ||
1693 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) | ||
1694 | rbp->status &= ~RBP_LOANED; | ||
1695 | goto next_rbrq_entry; | ||
1696 | } | ||
1697 | |||
1698 | if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { | 1709 | if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
1699 | hprintk("HBUF_ERR! (cid 0x%x)\n", cid); | 1710 | hprintk("HBUF_ERR! (cid 0x%x)\n", cid); |
1700 | atomic_inc(&vcc->stats->rx_drop); | 1711 | atomic_inc(&vcc->stats->rx_drop); |
1701 | goto return_host_buffers; | 1712 | goto return_host_buffers; |
1702 | } | 1713 | } |
1703 | 1714 | ||
1704 | he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head); | 1715 | heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; |
1705 | he_vcc->iov_tail->iov_len = buf_len; | 1716 | clear_bit(i, he_dev->rbpl_table); |
1706 | he_vcc->pdu_len += buf_len; | 1717 | list_move_tail(&heb->entry, &he_vcc->buffers); |
1707 | ++he_vcc->iov_tail; | 1718 | he_vcc->pdu_len += heb->len; |
1708 | 1719 | ||
1709 | if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { | 1720 | if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { |
1710 | lastcid = -1; | 1721 | lastcid = -1; |
@@ -1713,12 +1724,6 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1713 | goto return_host_buffers; | 1724 | goto return_host_buffers; |
1714 | } | 1725 | } |
1715 | 1726 | ||
1716 | #ifdef notdef | ||
1717 | if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) { | ||
1718 | hprintk("iovec full! cid 0x%x\n", cid); | ||
1719 | goto return_host_buffers; | ||
1720 | } | ||
1721 | #endif | ||
1722 | if (!RBRQ_END_PDU(he_dev->rbrq_head)) | 1727 | if (!RBRQ_END_PDU(he_dev->rbrq_head)) |
1723 | goto next_rbrq_entry; | 1728 | goto next_rbrq_entry; |
1724 | 1729 | ||
@@ -1746,9 +1751,8 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1746 | 1751 | ||
1747 | __net_timestamp(skb); | 1752 | __net_timestamp(skb); |
1748 | 1753 | ||
1749 | for (iov = he_vcc->iov_head; iov < he_vcc->iov_tail; ++iov) | 1754 | list_for_each_entry(heb, &he_vcc->buffers, entry) |
1750 | memcpy(skb_put(skb, iov->iov_len), | 1755 | memcpy(skb_put(skb, heb->len), &heb->data, heb->len); |
1751 | he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); | ||
1752 | 1756 | ||
1753 | switch (vcc->qos.aal) { | 1757 | switch (vcc->qos.aal) { |
1754 | case ATM_AAL0: | 1758 | case ATM_AAL0: |
@@ -1788,12 +1792,9 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1788 | return_host_buffers: | 1792 | return_host_buffers: |
1789 | ++pdus_assembled; | 1793 | ++pdus_assembled; |
1790 | 1794 | ||
1791 | for (iov = he_vcc->iov_head; iov < he_vcc->iov_tail; ++iov) { | 1795 | list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) |
1792 | rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)]; | 1796 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
1793 | rbp->status &= ~RBP_LOANED; | 1797 | INIT_LIST_HEAD(&he_vcc->buffers); |
1794 | } | ||
1795 | |||
1796 | he_vcc->iov_tail = he_vcc->iov_head; | ||
1797 | he_vcc->pdu_len = 0; | 1798 | he_vcc->pdu_len = 0; |
1798 | 1799 | ||
1799 | next_rbrq_entry: | 1800 | next_rbrq_entry: |
@@ -1897,23 +1898,43 @@ next_tbrq_entry: | |||
1897 | static void | 1898 | static void |
1898 | he_service_rbpl(struct he_dev *he_dev, int group) | 1899 | he_service_rbpl(struct he_dev *he_dev, int group) |
1899 | { | 1900 | { |
1900 | struct he_rbp *newtail; | 1901 | struct he_rbp *new_tail; |
1901 | struct he_rbp *rbpl_head; | 1902 | struct he_rbp *rbpl_head; |
1903 | struct he_buff *heb; | ||
1904 | dma_addr_t mapping; | ||
1905 | int i; | ||
1902 | int moved = 0; | 1906 | int moved = 0; |
1903 | 1907 | ||
1904 | rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | | 1908 | rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | |
1905 | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); | 1909 | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); |
1906 | 1910 | ||
1907 | for (;;) { | 1911 | for (;;) { |
1908 | newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | | 1912 | new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | |
1909 | RBPL_MASK(he_dev->rbpl_tail+1)); | 1913 | RBPL_MASK(he_dev->rbpl_tail+1)); |
1910 | 1914 | ||
1911 | /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ | 1915 | /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ |
1912 | if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED)) | 1916 | if (new_tail == rbpl_head) |
1913 | break; | 1917 | break; |
1914 | 1918 | ||
1915 | newtail->status |= RBP_LOANED; | 1919 | i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); |
1916 | he_dev->rbpl_tail = newtail; | 1920 | if (i > (RBPL_TABLE_SIZE - 1)) { |
1921 | i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); | ||
1922 | if (i > (RBPL_TABLE_SIZE - 1)) | ||
1923 | break; | ||
1924 | } | ||
1925 | he_dev->rbpl_hint = i + 1; | ||
1926 | |||
1927 | heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); | ||
1928 | if (!heb) | ||
1929 | break; | ||
1930 | heb->mapping = mapping; | ||
1931 | list_add(&heb->entry, &he_dev->rbpl_outstanding); | ||
1932 | he_dev->rbpl_virt[i] = heb; | ||
1933 | set_bit(i, he_dev->rbpl_table); | ||
1934 | new_tail->idx = i << RBP_IDX_OFFSET; | ||
1935 | new_tail->phys = mapping + offsetof(struct he_buff, data); | ||
1936 | |||
1937 | he_dev->rbpl_tail = new_tail; | ||
1917 | ++moved; | 1938 | ++moved; |
1918 | } | 1939 | } |
1919 | 1940 | ||
@@ -2137,7 +2158,7 @@ he_open(struct atm_vcc *vcc) | |||
2137 | return -ENOMEM; | 2158 | return -ENOMEM; |
2138 | } | 2159 | } |
2139 | 2160 | ||
2140 | he_vcc->iov_tail = he_vcc->iov_head; | 2161 | INIT_LIST_HEAD(&he_vcc->buffers); |
2141 | he_vcc->pdu_len = 0; | 2162 | he_vcc->pdu_len = 0; |
2142 | he_vcc->rc_index = -1; | 2163 | he_vcc->rc_index = -1; |
2143 | 2164 | ||
diff --git a/drivers/atm/he.h b/drivers/atm/he.h index 8bf3264e5d00..110a27d2ecfc 100644 --- a/drivers/atm/he.h +++ b/drivers/atm/he.h | |||
@@ -198,26 +198,33 @@ struct he_hsp { | |||
198 | } group[HE_NUM_GROUPS]; | 198 | } group[HE_NUM_GROUPS]; |
199 | }; | 199 | }; |
200 | 200 | ||
201 | /* figure 2.9 receive buffer pools */ | 201 | /* |
202 | * figure 2.9 receive buffer pools | ||
203 | * | ||
204 | * since a virtual address might be more than 32 bits, we store an index | ||
205 | * in the virt member of he_rbp. NOTE: the lower six bits in the rbrq | ||
206 | * addr member are used for buffer status further limiting us to 26 bits. | ||
207 | */ | ||
202 | 208 | ||
203 | struct he_rbp { | 209 | struct he_rbp { |
204 | volatile u32 phys; | 210 | volatile u32 phys; |
205 | volatile u32 status; | 211 | volatile u32 idx; /* virt */ |
206 | }; | 212 | }; |
207 | 213 | ||
208 | /* NOTE: it is suggested that virt be the virtual address of the host | 214 | #define RBP_IDX_OFFSET 6 |
209 | buffer. on a 64-bit machine, this would not work. Instead, we | 215 | |
210 | store the real virtual address in another list, and store an index | 216 | /* |
211 | (and buffer status) in the virt member. | 217 | * the he dma engine will try to hold an extra 16 buffers in its local |
212 | */ | 218 | * caches. and add a couple buffers for safety. |
219 | */ | ||
213 | 220 | ||
214 | #define RBP_INDEX_OFF 6 | 221 | #define RBPL_TABLE_SIZE (CONFIG_RBPL_SIZE + 16 + 2) |
215 | #define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff) | ||
216 | #define RBP_LOANED 0x80000000 | ||
217 | #define RBP_SMALLBUF 0x40000000 | ||
218 | 222 | ||
219 | struct he_virt { | 223 | struct he_buff { |
220 | void *virt; | 224 | struct list_head entry; |
225 | dma_addr_t mapping; | ||
226 | unsigned long len; | ||
227 | u8 data[]; | ||
221 | }; | 228 | }; |
222 | 229 | ||
223 | #ifdef notyet | 230 | #ifdef notyet |
@@ -286,10 +293,13 @@ struct he_dev { | |||
286 | struct he_rbrq *rbrq_base, *rbrq_head; | 293 | struct he_rbrq *rbrq_base, *rbrq_head; |
287 | int rbrq_peak; | 294 | int rbrq_peak; |
288 | 295 | ||
296 | struct he_buff **rbpl_virt; | ||
297 | unsigned long *rbpl_table; | ||
298 | unsigned long rbpl_hint; | ||
289 | struct pci_pool *rbpl_pool; | 299 | struct pci_pool *rbpl_pool; |
290 | dma_addr_t rbpl_phys; | 300 | dma_addr_t rbpl_phys; |
291 | struct he_rbp *rbpl_base, *rbpl_tail; | 301 | struct he_rbp *rbpl_base, *rbpl_tail; |
292 | struct he_virt *rbpl_virt; | 302 | struct list_head rbpl_outstanding; |
293 | int rbpl_peak; | 303 | int rbpl_peak; |
294 | 304 | ||
295 | dma_addr_t tbrq_phys; | 305 | dma_addr_t tbrq_phys; |
@@ -304,20 +314,12 @@ struct he_dev { | |||
304 | struct he_dev *next; | 314 | struct he_dev *next; |
305 | }; | 315 | }; |
306 | 316 | ||
307 | struct he_iovec | ||
308 | { | ||
309 | u32 iov_base; | ||
310 | u32 iov_len; | ||
311 | }; | ||
312 | |||
313 | #define HE_MAXIOV 20 | 317 | #define HE_MAXIOV 20 |
314 | 318 | ||
315 | struct he_vcc | 319 | struct he_vcc |
316 | { | 320 | { |
317 | struct he_iovec iov_head[HE_MAXIOV]; | 321 | struct list_head buffers; |
318 | struct he_iovec *iov_tail; | ||
319 | int pdu_len; | 322 | int pdu_len; |
320 | |||
321 | int rc_index; | 323 | int rc_index; |
322 | 324 | ||
323 | wait_queue_head_t rx_waitq; | 325 | wait_queue_head_t rx_waitq; |