aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fwestphal@astaro.com>2010-01-05 10:09:46 -0500
committerFlorian Westphal <fw@strlen.de>2010-02-16 11:27:19 -0500
commit81e675c227ec60a0bdcbb547dc530ebee23ff931 (patch)
tree9fa0dc7b261ed3de8a87dfc0e9d4794c70e2b379 /net
parent49facff9f92508413f3da598f86aaf6c1121ff27 (diff)
netfilter: ebtables: add CONFIG_COMPAT support
Main code for 32 bit userland ebtables binary with 64 bit kernels support. Tested on x86_64 kernel only, using 64bit ebtables binary for output comparision. At least ebt_mark, m_mark and ebt_limit need CONFIG_COMPAT hooks, too. remaining problem: The ebtables userland makefile has: ifeq ($(shell uname -m),sparc64) CFLAGS+=-DEBT_MIN_ALIGN=8 -DKERNEL_64_USERSPACE_32 endif struct ebt_replace, ebt_entry_match etc. then contain userland-side padding, i.e. even if we are called from a 32 bit userland, the structures may already be in the right format. This problem is addressed in a follow-up patch. Signed-off-by: Florian Westphal <fwestphal@astaro.com>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebtables.c887
1 files changed, 886 insertions, 1 deletions
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 76b99d3c1eea..fcaefdd6200b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -51,11 +51,37 @@
51 51
52static DEFINE_MUTEX(ebt_mutex); 52static DEFINE_MUTEX(ebt_mutex);
53 53
54#ifdef CONFIG_COMPAT
55static void ebt_standard_compat_from_user(void *dst, const void *src)
56{
57 int v = *(compat_int_t *)src;
58
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
62}
63
64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65{
66 compat_int_t cv = *(int *)src;
67
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71}
72#endif
73
74
54static struct xt_target ebt_standard_target = { 75static struct xt_target ebt_standard_target = {
55 .name = "standard", 76 .name = "standard",
56 .revision = 0, 77 .revision = 0,
57 .family = NFPROTO_BRIDGE, 78 .family = NFPROTO_BRIDGE,
58 .targetsize = sizeof(int), 79 .targetsize = sizeof(int),
80#ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84#endif
59}; 85};
60 86
61static inline int 87static inline int
@@ -1454,7 +1480,7 @@ static int do_ebt_set_ctl(struct sock *sk,
1454 break; 1480 break;
1455 default: 1481 default:
1456 ret = -EINVAL; 1482 ret = -EINVAL;
1457 } 1483 }
1458 return ret; 1484 return ret;
1459} 1485}
1460 1486
@@ -1514,15 +1540,874 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1514 return ret; 1540 return ret;
1515} 1541}
1516 1542
1543#ifdef CONFIG_COMPAT
1544/* 32 bit-userspace compatibility definitions. */
1545struct compat_ebt_replace {
1546 char name[EBT_TABLE_MAXNAMELEN];
1547 compat_uint_t valid_hooks;
1548 compat_uint_t nentries;
1549 compat_uint_t entries_size;
1550 /* start of the chains */
1551 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1552 /* nr of counters userspace expects back */
1553 compat_uint_t num_counters;
1554 /* where the kernel will put the old counters. */
1555 compat_uptr_t counters;
1556 compat_uptr_t entries;
1557};
1558
1559/* struct ebt_entry_match, _target and _watcher have same layout */
1560struct compat_ebt_entry_mwt {
1561 union {
1562 char name[EBT_FUNCTION_MAXNAMELEN];
1563 compat_uptr_t ptr;
1564 } u;
1565 compat_uint_t match_size;
1566 compat_uint_t data[0];
1567};
1568
1569/* account for possible padding between match_size and ->data */
1570static int ebt_compat_entry_padsize(void)
1571{
1572 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1573 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1574 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1575 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1576}
1577
1578static int ebt_compat_match_offset(const struct xt_match *match,
1579 unsigned int userlen)
1580{
1581 /*
1582 * ebt_among needs special handling. The kernel .matchsize is
1583 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1584 * value is expected.
1585 * Example: userspace sends 4500, ebt_among.c wants 4504.
1586 */
1587 if (unlikely(match->matchsize == -1))
1588 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1589 return xt_compat_match_offset(match);
1590}
1591
1592static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1593 unsigned int *size)
1594{
1595 const struct xt_match *match = m->u.match;
1596 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1597 int off = ebt_compat_match_offset(match, m->match_size);
1598 compat_uint_t msize = m->match_size - off;
1599
1600 BUG_ON(off >= m->match_size);
1601
1602 if (copy_to_user(cm->u.name, match->name,
1603 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1604 return -EFAULT;
1605
1606 if (match->compat_to_user) {
1607 if (match->compat_to_user(cm->data, m->data))
1608 return -EFAULT;
1609 } else if (copy_to_user(cm->data, m->data, msize))
1610 return -EFAULT;
1611
1612 *size -= ebt_compat_entry_padsize() + off;
1613 *dstptr = cm->data;
1614 *dstptr += msize;
1615 return 0;
1616}
1617
1618static int compat_target_to_user(struct ebt_entry_target *t,
1619 void __user **dstptr,
1620 unsigned int *size)
1621{
1622 const struct xt_target *target = t->u.target;
1623 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1624 int off = xt_compat_target_offset(target);
1625 compat_uint_t tsize = t->target_size - off;
1626
1627 BUG_ON(off >= t->target_size);
1628
1629 if (copy_to_user(cm->u.name, target->name,
1630 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1631 return -EFAULT;
1632
1633 if (target->compat_to_user) {
1634 if (target->compat_to_user(cm->data, t->data))
1635 return -EFAULT;
1636 } else if (copy_to_user(cm->data, t->data, tsize))
1637 return -EFAULT;
1638
1639 *size -= ebt_compat_entry_padsize() + off;
1640 *dstptr = cm->data;
1641 *dstptr += tsize;
1642 return 0;
1643}
1644
1645static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1646 void __user **dstptr,
1647 unsigned int *size)
1648{
1649 return compat_target_to_user((struct ebt_entry_target *)w,
1650 dstptr, size);
1651}
1652
1653static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1654 unsigned int *size)
1655{
1656 struct ebt_entry_target *t;
1657 struct ebt_entry __user *ce;
1658 u32 watchers_offset, target_offset, next_offset;
1659 compat_uint_t origsize;
1660 int ret;
1661
1662 if (e->bitmask == 0) {
1663 if (*size < sizeof(struct ebt_entries))
1664 return -EINVAL;
1665 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1666 return -EFAULT;
1667
1668 *dstptr += sizeof(struct ebt_entries);
1669 *size -= sizeof(struct ebt_entries);
1670 return 0;
1671 }
1672
1673 if (*size < sizeof(*ce))
1674 return -EINVAL;
1675
1676 ce = (struct ebt_entry __user *)*dstptr;
1677 if (copy_to_user(ce, e, sizeof(*ce)))
1678 return -EFAULT;
1679
1680 origsize = *size;
1681 *dstptr += sizeof(*ce);
1682
1683 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1684 if (ret)
1685 return ret;
1686 watchers_offset = e->watchers_offset - (origsize - *size);
1687
1688 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1689 if (ret)
1690 return ret;
1691 target_offset = e->target_offset - (origsize - *size);
1692
1693 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1694
1695 ret = compat_target_to_user(t, dstptr, size);
1696 if (ret)
1697 return ret;
1698 next_offset = e->next_offset - (origsize - *size);
1699
1700 if (put_user(watchers_offset, &ce->watchers_offset) ||
1701 put_user(target_offset, &ce->target_offset) ||
1702 put_user(next_offset, &ce->next_offset))
1703 return -EFAULT;
1704
1705 *size -= sizeof(*ce);
1706 return 0;
1707}
1708
1709static int compat_calc_match(struct ebt_entry_match *m, int *off)
1710{
1711 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1712 *off += ebt_compat_entry_padsize();
1713 return 0;
1714}
1715
1716static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1717{
1718 *off += xt_compat_target_offset(w->u.watcher);
1719 *off += ebt_compat_entry_padsize();
1720 return 0;
1721}
1722
1723static int compat_calc_entry(const struct ebt_entry *e,
1724 const struct ebt_table_info *info,
1725 const void *base,
1726 struct compat_ebt_replace *newinfo)
1727{
1728 const struct ebt_entry_target *t;
1729 unsigned int entry_offset;
1730 int off, ret, i;
1731
1732 if (e->bitmask == 0)
1733 return 0;
1734
1735 off = 0;
1736 entry_offset = (void *)e - base;
1737
1738 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1739 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1740
1741 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1742
1743 off += xt_compat_target_offset(t->u.target);
1744 off += ebt_compat_entry_padsize();
1745
1746 newinfo->entries_size -= off;
1747
1748 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1749 if (ret)
1750 return ret;
1751
1752 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1753 const void *hookptr = info->hook_entry[i];
1754 if (info->hook_entry[i] &&
1755 (e < (struct ebt_entry *)(base - hookptr))) {
1756 newinfo->hook_entry[i] -= off;
1757 pr_debug("0x%08X -> 0x%08X\n",
1758 newinfo->hook_entry[i] + off,
1759 newinfo->hook_entry[i]);
1760 }
1761 }
1762
1763 return 0;
1764}
1765
1766
1767static int compat_table_info(const struct ebt_table_info *info,
1768 struct compat_ebt_replace *newinfo)
1769{
1770 unsigned int size = info->entries_size;
1771 const void *entries = info->entries;
1772
1773 newinfo->entries_size = size;
1774
1775 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1776 entries, newinfo);
1777}
1778
1779static int compat_copy_everything_to_user(struct ebt_table *t,
1780 void __user *user, int *len, int cmd)
1781{
1782 struct compat_ebt_replace repl, tmp;
1783 struct ebt_counter *oldcounters;
1784 struct ebt_table_info tinfo;
1785 int ret;
1786 void __user *pos;
1787
1788 memset(&tinfo, 0, sizeof(tinfo));
1789
1790 if (cmd == EBT_SO_GET_ENTRIES) {
1791 tinfo.entries_size = t->private->entries_size;
1792 tinfo.nentries = t->private->nentries;
1793 tinfo.entries = t->private->entries;
1794 oldcounters = t->private->counters;
1795 } else {
1796 tinfo.entries_size = t->table->entries_size;
1797 tinfo.nentries = t->table->nentries;
1798 tinfo.entries = t->table->entries;
1799 oldcounters = t->table->counters;
1800 }
1801
1802 if (copy_from_user(&tmp, user, sizeof(tmp)))
1803 return -EFAULT;
1804
1805 if (tmp.nentries != tinfo.nentries ||
1806 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1807 return -EINVAL;
1808
1809 memcpy(&repl, &tmp, sizeof(repl));
1810 if (cmd == EBT_SO_GET_ENTRIES)
1811 ret = compat_table_info(t->private, &repl);
1812 else
1813 ret = compat_table_info(&tinfo, &repl);
1814 if (ret)
1815 return ret;
1816
1817 if (*len != sizeof(tmp) + repl.entries_size +
1818 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1819 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1820 *len, tinfo.entries_size, repl.entries_size);
1821 return -EINVAL;
1822 }
1823
1824 /* userspace might not need the counters */
1825 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1826 tmp.num_counters, tinfo.nentries);
1827 if (ret)
1828 return ret;
1829
1830 pos = compat_ptr(tmp.entries);
1831 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1832 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1833}
1834
1835struct ebt_entries_buf_state {
1836 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1837 u32 buf_kern_len; /* total size of kernel buffer */
1838 u32 buf_kern_offset; /* amount of data copied so far */
1839 u32 buf_user_offset; /* read position in userspace buffer */
1840};
1841
1842static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1843{
1844 state->buf_kern_offset += sz;
1845 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1846}
1847
1848static int ebt_buf_add(struct ebt_entries_buf_state *state,
1849 void *data, unsigned int sz)
1850{
1851 if (state->buf_kern_start == NULL)
1852 goto count_only;
1853
1854 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1855
1856 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1857
1858 count_only:
1859 state->buf_user_offset += sz;
1860 return ebt_buf_count(state, sz);
1861}
1862
1863static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1864{
1865 char *b = state->buf_kern_start;
1866
1867 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1868
1869 if (b != NULL && sz > 0)
1870 memset(b + state->buf_kern_offset, 0, sz);
1871 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1872 return ebt_buf_count(state, sz);
1873}
1874
1875enum compat_mwt {
1876 EBT_COMPAT_MATCH,
1877 EBT_COMPAT_WATCHER,
1878 EBT_COMPAT_TARGET,
1879};
1880
1881static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1882 enum compat_mwt compat_mwt,
1883 struct ebt_entries_buf_state *state,
1884 const unsigned char *base)
1885{
1886 char name[EBT_FUNCTION_MAXNAMELEN];
1887 struct xt_match *match;
1888 struct xt_target *wt;
1889 void *dst = NULL;
1890 int off, pad = 0, ret = 0;
1891 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1892
1893 strlcpy(name, mwt->u.name, sizeof(name));
1894
1895 if (state->buf_kern_start)
1896 dst = state->buf_kern_start + state->buf_kern_offset;
1897
1898 entry_offset = (unsigned char *) mwt - base;
1899 switch (compat_mwt) {
1900 case EBT_COMPAT_MATCH:
1901 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1902 name, 0), "ebt_%s", name);
1903 if (match == NULL)
1904 return -ENOENT;
1905 if (IS_ERR(match))
1906 return PTR_ERR(match);
1907
1908 off = ebt_compat_match_offset(match, match_size);
1909 if (dst) {
1910 if (match->compat_from_user)
1911 match->compat_from_user(dst, mwt->data);
1912 else
1913 memcpy(dst, mwt->data, match_size);
1914 }
1915
1916 size_kern = match->matchsize;
1917 if (unlikely(size_kern == -1))
1918 size_kern = match_size;
1919 module_put(match->me);
1920 break;
1921 case EBT_COMPAT_WATCHER: /* fallthrough */
1922 case EBT_COMPAT_TARGET:
1923 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1924 name, 0), "ebt_%s", name);
1925 if (wt == NULL)
1926 return -ENOENT;
1927 if (IS_ERR(wt))
1928 return PTR_ERR(wt);
1929 off = xt_compat_target_offset(wt);
1930
1931 if (dst) {
1932 if (wt->compat_from_user)
1933 wt->compat_from_user(dst, mwt->data);
1934 else
1935 memcpy(dst, mwt->data, match_size);
1936 }
1937
1938 size_kern = wt->targetsize;
1939 module_put(wt->me);
1940 break;
1941 }
1942
1943 if (!dst) {
1944 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1945 off + ebt_compat_entry_padsize());
1946 if (ret < 0)
1947 return ret;
1948 }
1949
1950 state->buf_kern_offset += match_size + off;
1951 state->buf_user_offset += match_size;
1952 pad = XT_ALIGN(size_kern) - size_kern;
1953
1954 if (pad > 0 && dst) {
1955 BUG_ON(state->buf_kern_len <= pad);
1956 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1957 memset(dst + size_kern, 0, pad);
1958 }
1959 return off + match_size;
1960}
1961
1962/*
1963 * return size of all matches, watchers or target, including necessary
1964 * alignment and padding.
1965 */
1966static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1967 unsigned int size_left, enum compat_mwt type,
1968 struct ebt_entries_buf_state *state, const void *base)
1969{
1970 int growth = 0;
1971 char *buf;
1972
1973 if (size_left == 0)
1974 return 0;
1975
1976 buf = (char *) match32;
1977
1978 while (size_left >= sizeof(*match32)) {
1979 struct ebt_entry_match *match_kern;
1980 int ret;
1981
1982 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1983 if (match_kern) {
1984 char *tmp;
1985 tmp = state->buf_kern_start + state->buf_kern_offset;
1986 match_kern = (struct ebt_entry_match *) tmp;
1987 }
1988 ret = ebt_buf_add(state, buf, sizeof(*match32));
1989 if (ret < 0)
1990 return ret;
1991 size_left -= sizeof(*match32);
1992
1993 /* add padding before match->data (if any) */
1994 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1995 if (ret < 0)
1996 return ret;
1997
1998 if (match32->match_size > size_left)
1999 return -EINVAL;
2000
2001 size_left -= match32->match_size;
2002
2003 ret = compat_mtw_from_user(match32, type, state, base);
2004 if (ret < 0)
2005 return ret;
2006
2007 BUG_ON(ret < match32->match_size);
2008 growth += ret - match32->match_size;
2009 growth += ebt_compat_entry_padsize();
2010
2011 buf += sizeof(*match32);
2012 buf += match32->match_size;
2013
2014 if (match_kern)
2015 match_kern->match_size = ret;
2016
2017 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2018 match32 = (struct compat_ebt_entry_mwt *) buf;
2019 }
2020
2021 return growth;
2022}
2023
2024#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2025({ \
2026 unsigned int __i; \
2027 int __ret = 0; \
2028 struct compat_ebt_entry_mwt *__watcher; \
2029 \
2030 for (__i = e->watchers_offset; \
2031 __i < (e)->target_offset; \
2032 __i += __watcher->watcher_size + \
2033 sizeof(struct compat_ebt_entry_mwt)) { \
2034 __watcher = (void *)(e) + __i; \
2035 __ret = fn(__watcher , ## args); \
2036 if (__ret != 0) \
2037 break; \
2038 } \
2039 if (__ret == 0) { \
2040 if (__i != (e)->target_offset) \
2041 __ret = -EINVAL; \
2042 } \
2043 __ret; \
2044})
2045
2046#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2047({ \
2048 unsigned int __i; \
2049 int __ret = 0; \
2050 struct compat_ebt_entry_mwt *__match; \
2051 \
2052 for (__i = sizeof(struct ebt_entry); \
2053 __i < (e)->watchers_offset; \
2054 __i += __match->match_size + \
2055 sizeof(struct compat_ebt_entry_mwt)) { \
2056 __match = (void *)(e) + __i; \
2057 __ret = fn(__match , ## args); \
2058 if (__ret != 0) \
2059 break; \
2060 } \
2061 if (__ret == 0) { \
2062 if (__i != (e)->watchers_offset) \
2063 __ret = -EINVAL; \
2064 } \
2065 __ret; \
2066})
2067
2068/* called for all ebt_entry structures. */
2069static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2070 unsigned int *total,
2071 struct ebt_entries_buf_state *state)
2072{
2073 unsigned int i, j, startoff, new_offset = 0;
2074 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2075 unsigned int offsets[4];
2076 unsigned int *offsets_update = NULL;
2077 int ret;
2078 char *buf_start;
2079
2080 if (*total < sizeof(struct ebt_entries))
2081 return -EINVAL;
2082
2083 if (!entry->bitmask) {
2084 *total -= sizeof(struct ebt_entries);
2085 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2086 }
2087 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2088 return -EINVAL;
2089
2090 startoff = state->buf_user_offset;
2091 /* pull in most part of ebt_entry, it does not need to be changed. */
2092 ret = ebt_buf_add(state, entry,
2093 offsetof(struct ebt_entry, watchers_offset));
2094 if (ret < 0)
2095 return ret;
2096
2097 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2098 memcpy(&offsets[1], &entry->watchers_offset,
2099 sizeof(offsets) - sizeof(offsets[0]));
2100
2101 if (state->buf_kern_start) {
2102 buf_start = state->buf_kern_start + state->buf_kern_offset;
2103 offsets_update = (unsigned int *) buf_start;
2104 }
2105 ret = ebt_buf_add(state, &offsets[1],
2106 sizeof(offsets) - sizeof(offsets[0]));
2107 if (ret < 0)
2108 return ret;
2109 buf_start = (char *) entry;
2110 /*
2111 * 0: matches offset, always follows ebt_entry.
2112 * 1: watchers offset, from ebt_entry structure
2113 * 2: target offset, from ebt_entry structure
2114 * 3: next ebt_entry offset, from ebt_entry structure
2115 *
2116 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2117 */
2118 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2119 struct compat_ebt_entry_mwt *match32;
2120 unsigned int size;
2121 char *buf = buf_start;
2122
2123 buf = buf_start + offsets[i];
2124 if (offsets[i] > offsets[j])
2125 return -EINVAL;
2126
2127 match32 = (struct compat_ebt_entry_mwt *) buf;
2128 size = offsets[j] - offsets[i];
2129 ret = ebt_size_mwt(match32, size, i, state, base);
2130 if (ret < 0)
2131 return ret;
2132 new_offset += ret;
2133 if (offsets_update && new_offset) {
2134 pr_debug("ebtables: change offset %d to %d\n",
2135 offsets_update[i], offsets[j] + new_offset);
2136 offsets_update[i] = offsets[j] + new_offset;
2137 }
2138 }
2139
2140 startoff = state->buf_user_offset - startoff;
2141
2142 BUG_ON(*total < startoff);
2143 *total -= startoff;
2144 return 0;
2145}
2146
2147/*
2148 * repl->entries_size is the size of the ebt_entry blob in userspace.
2149 * It might need more memory when copied to a 64 bit kernel in case
2150 * userspace is 32-bit. So, first task: find out how much memory is needed.
2151 *
2152 * Called before validation is performed.
2153 */
2154static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2155 struct ebt_entries_buf_state *state)
2156{
2157 unsigned int size_remaining = size_user;
2158 int ret;
2159
2160 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2161 &size_remaining, state);
2162 if (ret < 0)
2163 return ret;
2164
2165 WARN_ON(size_remaining);
2166 return state->buf_kern_offset;
2167}
2168
2169
2170static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2171 void __user *user, unsigned int len)
2172{
2173 struct compat_ebt_replace tmp;
2174 int i;
2175
2176 if (len < sizeof(tmp))
2177 return -EINVAL;
2178
2179 if (copy_from_user(&tmp, user, sizeof(tmp)))
2180 return -EFAULT;
2181
2182 if (len != sizeof(tmp) + tmp.entries_size)
2183 return -EINVAL;
2184
2185 if (tmp.entries_size == 0)
2186 return -EINVAL;
2187
2188 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2189 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2190 return -ENOMEM;
2191 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2192 return -ENOMEM;
2193
2194 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2195
2196 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2197 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2198 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2199
2200 repl->num_counters = tmp.num_counters;
2201 repl->counters = compat_ptr(tmp.counters);
2202 repl->entries = compat_ptr(tmp.entries);
2203 return 0;
2204}
2205
2206static int compat_do_replace(struct net *net, void __user *user,
2207 unsigned int len)
2208{
2209 int ret, i, countersize, size64;
2210 struct ebt_table_info *newinfo;
2211 struct ebt_replace tmp;
2212 struct ebt_entries_buf_state state;
2213 void *entries_tmp;
2214
2215 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2216 if (ret)
2217 return ret;
2218
2219 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2220 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2221 if (!newinfo)
2222 return -ENOMEM;
2223
2224 if (countersize)
2225 memset(newinfo->counters, 0, countersize);
2226
2227 memset(&state, 0, sizeof(state));
2228
2229 newinfo->entries = vmalloc(tmp.entries_size);
2230 if (!newinfo->entries) {
2231 ret = -ENOMEM;
2232 goto free_newinfo;
2233 }
2234 if (copy_from_user(
2235 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2236 ret = -EFAULT;
2237 goto free_entries;
2238 }
2239
2240 entries_tmp = newinfo->entries;
2241
2242 xt_compat_lock(NFPROTO_BRIDGE);
2243
2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2245 if (ret < 0)
2246 goto out_unlock;
2247
2248 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2249 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2250 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2251
2252 size64 = ret;
2253 newinfo->entries = vmalloc(size64);
2254 if (!newinfo->entries) {
2255 vfree(entries_tmp);
2256 ret = -ENOMEM;
2257 goto out_unlock;
2258 }
2259
2260 memset(&state, 0, sizeof(state));
2261 state.buf_kern_start = newinfo->entries;
2262 state.buf_kern_len = size64;
2263
2264 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2265 BUG_ON(ret < 0); /* parses same data again */
2266
2267 vfree(entries_tmp);
2268 tmp.entries_size = size64;
2269
2270 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2271 char __user *usrptr;
2272 if (tmp.hook_entry[i]) {
2273 unsigned int delta;
2274 usrptr = (char __user *) tmp.hook_entry[i];
2275 delta = usrptr - tmp.entries;
2276 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2277 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 }
2279 }
2280
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2283
2284 ret = do_replace_finish(net, &tmp, newinfo);
2285 if (ret == 0)
2286 return ret;
2287free_entries:
2288 vfree(newinfo->entries);
2289free_newinfo:
2290 vfree(newinfo);
2291 return ret;
2292out_unlock:
2293 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2294 xt_compat_unlock(NFPROTO_BRIDGE);
2295 goto free_entries;
2296}
2297
2298static int compat_update_counters(struct net *net, void __user *user,
2299 unsigned int len)
2300{
2301 struct compat_ebt_replace hlp;
2302
2303 if (copy_from_user(&hlp, user, sizeof(hlp)))
2304 return -EFAULT;
2305
2306 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2307 return -EINVAL;
2308
2309 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2310 hlp.num_counters, user, len);
2311}
2312
2313static int compat_do_ebt_set_ctl(struct sock *sk,
2314 int cmd, void __user *user, unsigned int len)
2315{
2316 int ret;
2317
2318 if (!capable(CAP_NET_ADMIN))
2319 return -EPERM;
2320
2321 switch (cmd) {
2322 case EBT_SO_SET_ENTRIES:
2323 ret = compat_do_replace(sock_net(sk), user, len);
2324 break;
2325 case EBT_SO_SET_COUNTERS:
2326 ret = compat_update_counters(sock_net(sk), user, len);
2327 break;
2328 default:
2329 ret = -EINVAL;
2330 }
2331 return ret;
2332}
2333
2334static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2335 void __user *user, int *len)
2336{
2337 int ret;
2338 struct compat_ebt_replace tmp;
2339 struct ebt_table *t;
2340
2341 if (!capable(CAP_NET_ADMIN))
2342 return -EPERM;
2343
2344 if ((cmd == EBT_SO_GET_INFO ||
2345 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2346 return -EINVAL;
2347
2348 if (copy_from_user(&tmp, user, sizeof(tmp)))
2349 return -EFAULT;
2350
2351 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2352 if (!t)
2353 return ret;
2354
2355 xt_compat_lock(NFPROTO_BRIDGE);
2356 switch (cmd) {
2357 case EBT_SO_GET_INFO:
2358 tmp.nentries = t->private->nentries;
2359 ret = compat_table_info(t->private, &tmp);
2360 if (ret)
2361 goto out;
2362 tmp.valid_hooks = t->valid_hooks;
2363
2364 if (copy_to_user(user, &tmp, *len) != 0) {
2365 ret = -EFAULT;
2366 break;
2367 }
2368 ret = 0;
2369 break;
2370 case EBT_SO_GET_INIT_INFO:
2371 tmp.nentries = t->table->nentries;
2372 tmp.entries_size = t->table->entries_size;
2373 tmp.valid_hooks = t->table->valid_hooks;
2374
2375 if (copy_to_user(user, &tmp, *len) != 0) {
2376 ret = -EFAULT;
2377 break;
2378 }
2379 ret = 0;
2380 break;
2381 case EBT_SO_GET_ENTRIES:
2382 case EBT_SO_GET_INIT_ENTRIES:
2383 ret = compat_copy_everything_to_user(t, user, len, cmd);
2384 break;
2385 default:
2386 ret = -EINVAL;
2387 }
2388 out:
2389 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2390 xt_compat_unlock(NFPROTO_BRIDGE);
2391 mutex_unlock(&ebt_mutex);
2392 return ret;
2393}
2394#endif
2395
1517static struct nf_sockopt_ops ebt_sockopts = 2396static struct nf_sockopt_ops ebt_sockopts =
1518{ 2397{
1519 .pf = PF_INET, 2398 .pf = PF_INET,
1520 .set_optmin = EBT_BASE_CTL, 2399 .set_optmin = EBT_BASE_CTL,
1521 .set_optmax = EBT_SO_SET_MAX + 1, 2400 .set_optmax = EBT_SO_SET_MAX + 1,
1522 .set = do_ebt_set_ctl, 2401 .set = do_ebt_set_ctl,
2402#ifdef CONFIG_COMPAT
2403 .compat_set = compat_do_ebt_set_ctl,
2404#endif
1523 .get_optmin = EBT_BASE_CTL, 2405 .get_optmin = EBT_BASE_CTL,
1524 .get_optmax = EBT_SO_GET_MAX + 1, 2406 .get_optmax = EBT_SO_GET_MAX + 1,
1525 .get = do_ebt_get_ctl, 2407 .get = do_ebt_get_ctl,
2408#ifdef CONFIG_COMPAT
2409 .compat_get = compat_do_ebt_get_ctl,
2410#endif
1526 .owner = THIS_MODULE, 2411 .owner = THIS_MODULE,
1527}; 2412};
1528 2413