diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2005-12-06 19:38:35 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-12-06 19:38:35 -0500 |
commit | 4ebf0ae2618fbbb0d365e5d295a30ccfcb91fe0b (patch) | |
tree | a58dbe174373f0e9614cd1e354825d8957c3de29 | |
parent | dfb4b9dceb35c567a595ae5e9d035cfda044a103 (diff) |
[AF_PACKET]: Convert PACKET_MMAP over to vm_insert_page().
So we can properly use __GFP_COMP and avoid the use of
PG_reserved pages.
With extremely helpful review from Hugh Dickins.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/packet/af_packet.c | 115 |
1 files changed, 61 insertions, 54 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 499ae3df4a44..3e2462760413 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1587,23 +1587,47 @@ static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order) | |||
1587 | return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1); | 1587 | return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1); |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | static void free_pg_vec(char **pg_vec, unsigned order, unsigned len) | 1590 | static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) |
1591 | { | 1591 | { |
1592 | int i; | 1592 | int i; |
1593 | 1593 | ||
1594 | for (i=0; i<len; i++) { | 1594 | for (i = 0; i < len; i++) { |
1595 | if (pg_vec[i]) { | 1595 | if (likely(pg_vec[i])) |
1596 | struct page *page, *pend; | 1596 | free_pages((unsigned long) pg_vec[i], order); |
1597 | |||
1598 | pend = pg_vec_endpage(pg_vec[i], order); | ||
1599 | for (page = virt_to_page(pg_vec[i]); page <= pend; page++) | ||
1600 | ClearPageReserved(page); | ||
1601 | free_pages((unsigned long)pg_vec[i], order); | ||
1602 | } | ||
1603 | } | 1597 | } |
1604 | kfree(pg_vec); | 1598 | kfree(pg_vec); |
1605 | } | 1599 | } |
1606 | 1600 | ||
1601 | static inline char *alloc_one_pg_vec_page(unsigned long order) | ||
1602 | { | ||
1603 | return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, | ||
1604 | order); | ||
1605 | } | ||
1606 | |||
1607 | static char **alloc_pg_vec(struct tpacket_req *req, int order) | ||
1608 | { | ||
1609 | unsigned int block_nr = req->tp_block_nr; | ||
1610 | char **pg_vec; | ||
1611 | int i; | ||
1612 | |||
1613 | pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL); | ||
1614 | if (unlikely(!pg_vec)) | ||
1615 | goto out; | ||
1616 | |||
1617 | for (i = 0; i < block_nr; i++) { | ||
1618 | pg_vec[i] = alloc_one_pg_vec_page(order); | ||
1619 | if (unlikely(!pg_vec[i])) | ||
1620 | goto out_free_pgvec; | ||
1621 | } | ||
1622 | |||
1623 | out: | ||
1624 | return pg_vec; | ||
1625 | |||
1626 | out_free_pgvec: | ||
1627 | free_pg_vec(pg_vec, order, block_nr); | ||
1628 | pg_vec = NULL; | ||
1629 | goto out; | ||
1630 | } | ||
1607 | 1631 | ||
1608 | static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing) | 1632 | static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing) |
1609 | { | 1633 | { |
@@ -1617,64 +1641,46 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1617 | 1641 | ||
1618 | /* Sanity tests and some calculations */ | 1642 | /* Sanity tests and some calculations */ |
1619 | 1643 | ||
1620 | if (po->pg_vec) | 1644 | if (unlikely(po->pg_vec)) |
1621 | return -EBUSY; | 1645 | return -EBUSY; |
1622 | 1646 | ||
1623 | if ((int)req->tp_block_size <= 0) | 1647 | if (unlikely((int)req->tp_block_size <= 0)) |
1624 | return -EINVAL; | 1648 | return -EINVAL; |
1625 | if (req->tp_block_size&(PAGE_SIZE-1)) | 1649 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
1626 | return -EINVAL; | 1650 | return -EINVAL; |
1627 | if (req->tp_frame_size < TPACKET_HDRLEN) | 1651 | if (unlikely(req->tp_frame_size < TPACKET_HDRLEN)) |
1628 | return -EINVAL; | 1652 | return -EINVAL; |
1629 | if (req->tp_frame_size&(TPACKET_ALIGNMENT-1)) | 1653 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
1630 | return -EINVAL; | 1654 | return -EINVAL; |
1631 | 1655 | ||
1632 | po->frames_per_block = req->tp_block_size/req->tp_frame_size; | 1656 | po->frames_per_block = req->tp_block_size/req->tp_frame_size; |
1633 | if (po->frames_per_block <= 0) | 1657 | if (unlikely(po->frames_per_block <= 0)) |
1634 | return -EINVAL; | 1658 | return -EINVAL; |
1635 | if (po->frames_per_block*req->tp_block_nr != req->tp_frame_nr) | 1659 | if (unlikely((po->frames_per_block * req->tp_block_nr) != |
1660 | req->tp_frame_nr)) | ||
1636 | return -EINVAL; | 1661 | return -EINVAL; |
1637 | /* OK! */ | ||
1638 | |||
1639 | /* Allocate page vector */ | ||
1640 | while ((PAGE_SIZE<<order) < req->tp_block_size) | ||
1641 | order++; | ||
1642 | 1662 | ||
1643 | err = -ENOMEM; | 1663 | err = -ENOMEM; |
1644 | 1664 | order = get_order(req->tp_block_size); | |
1645 | pg_vec = kmalloc(req->tp_block_nr*sizeof(char *), GFP_KERNEL); | 1665 | pg_vec = alloc_pg_vec(req, order); |
1646 | if (pg_vec == NULL) | 1666 | if (unlikely(!pg_vec)) |
1647 | goto out; | 1667 | goto out; |
1648 | memset(pg_vec, 0, req->tp_block_nr*sizeof(char **)); | ||
1649 | |||
1650 | for (i=0; i<req->tp_block_nr; i++) { | ||
1651 | struct page *page, *pend; | ||
1652 | pg_vec[i] = (char *)__get_free_pages(GFP_KERNEL, order); | ||
1653 | if (!pg_vec[i]) | ||
1654 | goto out_free_pgvec; | ||
1655 | |||
1656 | pend = pg_vec_endpage(pg_vec[i], order); | ||
1657 | for (page = virt_to_page(pg_vec[i]); page <= pend; page++) | ||
1658 | SetPageReserved(page); | ||
1659 | } | ||
1660 | /* Page vector is allocated */ | ||
1661 | 1668 | ||
1662 | l = 0; | 1669 | l = 0; |
1663 | for (i=0; i<req->tp_block_nr; i++) { | 1670 | for (i = 0; i < req->tp_block_nr; i++) { |
1664 | char *ptr = pg_vec[i]; | 1671 | char *ptr = pg_vec[i]; |
1665 | struct tpacket_hdr *header; | 1672 | struct tpacket_hdr *header; |
1666 | int k; | 1673 | int k; |
1667 | 1674 | ||
1668 | for (k=0; k<po->frames_per_block; k++) { | 1675 | for (k = 0; k < po->frames_per_block; k++) { |
1669 | 1676 | header = (struct tpacket_hdr *) ptr; | |
1670 | header = (struct tpacket_hdr*)ptr; | ||
1671 | header->tp_status = TP_STATUS_KERNEL; | 1677 | header->tp_status = TP_STATUS_KERNEL; |
1672 | ptr += req->tp_frame_size; | 1678 | ptr += req->tp_frame_size; |
1673 | } | 1679 | } |
1674 | } | 1680 | } |
1675 | /* Done */ | 1681 | /* Done */ |
1676 | } else { | 1682 | } else { |
1677 | if (req->tp_frame_nr) | 1683 | if (unlikely(req->tp_frame_nr)) |
1678 | return -EINVAL; | 1684 | return -EINVAL; |
1679 | } | 1685 | } |
1680 | 1686 | ||
@@ -1701,7 +1707,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1701 | 1707 | ||
1702 | spin_lock_bh(&sk->sk_receive_queue.lock); | 1708 | spin_lock_bh(&sk->sk_receive_queue.lock); |
1703 | pg_vec = XC(po->pg_vec, pg_vec); | 1709 | pg_vec = XC(po->pg_vec, pg_vec); |
1704 | po->frame_max = req->tp_frame_nr-1; | 1710 | po->frame_max = (req->tp_frame_nr - 1); |
1705 | po->head = 0; | 1711 | po->head = 0; |
1706 | po->frame_size = req->tp_frame_size; | 1712 | po->frame_size = req->tp_frame_size; |
1707 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 1713 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
@@ -1728,7 +1734,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1728 | 1734 | ||
1729 | release_sock(sk); | 1735 | release_sock(sk); |
1730 | 1736 | ||
1731 | out_free_pgvec: | ||
1732 | if (pg_vec) | 1737 | if (pg_vec) |
1733 | free_pg_vec(pg_vec, order, req->tp_block_nr); | 1738 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
1734 | out: | 1739 | out: |
@@ -1755,17 +1760,19 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st | |||
1755 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) | 1760 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) |
1756 | goto out; | 1761 | goto out; |
1757 | 1762 | ||
1758 | atomic_inc(&po->mapped); | ||
1759 | start = vma->vm_start; | 1763 | start = vma->vm_start; |
1760 | err = -EAGAIN; | 1764 | for (i = 0; i < po->pg_vec_len; i++) { |
1761 | for (i=0; i<po->pg_vec_len; i++) { | 1765 | struct page *page = virt_to_page(po->pg_vec[i]); |
1762 | if (remap_pfn_range(vma, start, | 1766 | int pg_num; |
1763 | __pa(po->pg_vec[i]) >> PAGE_SHIFT, | 1767 | |
1764 | po->pg_vec_pages*PAGE_SIZE, | 1768 | for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) { |
1765 | vma->vm_page_prot)) | 1769 | err = vm_insert_page(vma, start, page); |
1766 | goto out; | 1770 | if (unlikely(err)) |
1767 | start += po->pg_vec_pages*PAGE_SIZE; | 1771 | goto out; |
1772 | start += PAGE_SIZE; | ||
1773 | } | ||
1768 | } | 1774 | } |
1775 | atomic_inc(&po->mapped); | ||
1769 | vma->vm_ops = &packet_mmap_ops; | 1776 | vma->vm_ops = &packet_mmap_ops; |
1770 | err = 0; | 1777 | err = 0; |
1771 | 1778 | ||