aboutsummaryrefslogtreecommitdiffstats
path: root/net/xfrm/xfrm_policy.c
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2013-02-05 06:52:55 -0500
committerSteffen Klassert <steffen.klassert@secunet.com>2013-02-06 02:31:10 -0500
commita0073fe18e718a1c815fe8b0120f1ac3c60284ba (patch)
tree1f30d5f9415a90c5662376fd3e4c0420a431b9f1 /net/xfrm/xfrm_policy.c
parentfa8599db8f222fd9d351a640074377a841979187 (diff)
xfrm: Add a state resolution packet queue
As the default, we blackhole packets until the key manager resolves the states. This patch implements a packet queue where IPsec packets are queued until the states are resolved. We generate a dummy xfrm bundle, the output routine of the returned route enqueues the packet to a per policy queue and arms a timer that checks for state resolution when dst_output() is called. Once the states are resolved, the packets are sent out of the queue. If the states are not resolved after some time, the queue is flushed. This patch keeps the defaut behaviour to blackhole packets as long as we have no states. To enable the packet queue the sysctl xfrm_larval_drop must be switched off. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm/xfrm_policy.c')
-rw-r--r--net/xfrm/xfrm_policy.c229
1 files changed, 225 insertions, 4 deletions
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41eabc46f110..456b11b0f049 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -35,6 +35,10 @@
35 35
36#include "xfrm_hash.h" 36#include "xfrm_hash.h"
37 37
38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40#define XFRM_MAX_QUEUE_LEN 100
41
38DEFINE_MUTEX(xfrm_cfg_mutex); 42DEFINE_MUTEX(xfrm_cfg_mutex);
39EXPORT_SYMBOL(xfrm_cfg_mutex); 43EXPORT_SYMBOL(xfrm_cfg_mutex);
40 44
@@ -51,7 +55,7 @@ static struct kmem_cache *xfrm_dst_cache __read_mostly;
51static void xfrm_init_pmtu(struct dst_entry *dst); 55static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 56static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst); 57static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 58static void xfrm_policy_queue_process(unsigned long arg);
55 59
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 60static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir); 61 int dir);
@@ -287,8 +291,11 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
287 INIT_HLIST_NODE(&policy->byidx); 291 INIT_HLIST_NODE(&policy->byidx);
288 rwlock_init(&policy->lock); 292 rwlock_init(&policy->lock);
289 atomic_set(&policy->refcnt, 1); 293 atomic_set(&policy->refcnt, 1);
294 skb_queue_head_init(&policy->polq.hold_queue);
290 setup_timer(&policy->timer, xfrm_policy_timer, 295 setup_timer(&policy->timer, xfrm_policy_timer,
291 (unsigned long)policy); 296 (unsigned long)policy);
297 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 (unsigned long)policy);
292 policy->flo.ops = &xfrm_policy_fc_ops; 299 policy->flo.ops = &xfrm_policy_fc_ops;
293 } 300 }
294 return policy; 301 return policy;
@@ -309,6 +316,16 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
309} 316}
310EXPORT_SYMBOL(xfrm_policy_destroy); 317EXPORT_SYMBOL(xfrm_policy_destroy);
311 318
319static void xfrm_queue_purge(struct sk_buff_head *list)
320{
321 struct sk_buff *skb;
322
323 while ((skb = skb_dequeue(list)) != NULL) {
324 dev_put(skb->dev);
325 kfree_skb(skb);
326 }
327}
328
312/* Rule must be locked. Release descentant resources, announce 329/* Rule must be locked. Release descentant resources, announce
313 * entry dead. The rule must be unlinked from lists to the moment. 330 * entry dead. The rule must be unlinked from lists to the moment.
314 */ 331 */
@@ -319,6 +336,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
319 336
320 atomic_inc(&policy->genid); 337 atomic_inc(&policy->genid);
321 338
339 del_timer(&policy->polq.hold_timer);
340 xfrm_queue_purge(&policy->polq.hold_queue);
341
322 if (del_timer(&policy->timer)) 342 if (del_timer(&policy->timer))
323 xfrm_pol_put(policy); 343 xfrm_pol_put(policy);
324 344
@@ -562,6 +582,31 @@ static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s
562 return 0; 582 return 0;
563} 583}
564 584
585static void xfrm_policy_requeue(struct xfrm_policy *old,
586 struct xfrm_policy *new)
587{
588 struct xfrm_policy_queue *pq = &old->polq;
589 struct sk_buff_head list;
590
591 __skb_queue_head_init(&list);
592
593 spin_lock_bh(&pq->hold_queue.lock);
594 skb_queue_splice_init(&pq->hold_queue, &list);
595 del_timer(&pq->hold_timer);
596 spin_unlock_bh(&pq->hold_queue.lock);
597
598 if (skb_queue_empty(&list))
599 return;
600
601 pq = &new->polq;
602
603 spin_lock_bh(&pq->hold_queue.lock);
604 skb_queue_splice(&list, &pq->hold_queue);
605 pq->timeout = XFRM_QUEUE_TMO_MIN;
606 mod_timer(&pq->hold_timer, jiffies);
607 spin_unlock_bh(&pq->hold_queue.lock);
608}
609
565int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 610int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
566{ 611{
567 struct net *net = xp_net(policy); 612 struct net *net = xp_net(policy);
@@ -603,8 +648,10 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
603 net->xfrm.policy_count[dir]++; 648 net->xfrm.policy_count[dir]++;
604 atomic_inc(&flow_cache_genid); 649 atomic_inc(&flow_cache_genid);
605 rt_genid_bump(net); 650 rt_genid_bump(net);
606 if (delpol) 651 if (delpol) {
652 xfrm_policy_requeue(delpol, policy);
607 __xfrm_policy_unlink(delpol, dir); 653 __xfrm_policy_unlink(delpol, dir);
654 }
608 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 655 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
609 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 656 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
610 policy->curlft.add_time = get_seconds(); 657 policy->curlft.add_time = get_seconds();
@@ -1115,11 +1162,15 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1115 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1162 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1116 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1163 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1117 } 1164 }
1118 if (old_pol) 1165 if (old_pol) {
1166 if (pol)
1167 xfrm_policy_requeue(old_pol, pol);
1168
1119 /* Unlinking succeeds always. This is the only function 1169 /* Unlinking succeeds always. This is the only function
1120 * allowed to delete or replace socket policy. 1170 * allowed to delete or replace socket policy.
1121 */ 1171 */
1122 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1172 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1173 }
1123 write_unlock_bh(&xfrm_policy_lock); 1174 write_unlock_bh(&xfrm_policy_lock);
1124 1175
1125 if (old_pol) { 1176 if (old_pol) {
@@ -1310,6 +1361,8 @@ static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *f
1310 * It means we need to try again resolving. */ 1361 * It means we need to try again resolving. */
1311 if (xdst->num_xfrms > 0) 1362 if (xdst->num_xfrms > 0)
1312 return NULL; 1363 return NULL;
1364 } else if (dst->flags & DST_XFRM_QUEUE) {
1365 return NULL;
1313 } else { 1366 } else {
1314 /* Real bundle */ 1367 /* Real bundle */
1315 if (stale_bundle(dst)) 1368 if (stale_bundle(dst))
@@ -1673,6 +1726,171 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1673 return xdst; 1726 return xdst;
1674} 1727}
1675 1728
1729static void xfrm_policy_queue_process(unsigned long arg)
1730{
1731 int err = 0;
1732 struct sk_buff *skb;
1733 struct sock *sk;
1734 struct dst_entry *dst;
1735 struct net_device *dev;
1736 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1737 struct xfrm_policy_queue *pq = &pol->polq;
1738 struct flowi fl;
1739 struct sk_buff_head list;
1740
1741 spin_lock(&pq->hold_queue.lock);
1742 skb = skb_peek(&pq->hold_queue);
1743 dst = skb_dst(skb);
1744 sk = skb->sk;
1745 xfrm_decode_session(skb, &fl, dst->ops->family);
1746 spin_unlock(&pq->hold_queue.lock);
1747
1748 dst_hold(dst->path);
1749 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1750 sk, 0);
1751 if (IS_ERR(dst))
1752 goto purge_queue;
1753
1754 if (dst->flags & DST_XFRM_QUEUE) {
1755 dst_release(dst);
1756
1757 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1758 goto purge_queue;
1759
1760 pq->timeout = pq->timeout << 1;
1761 mod_timer(&pq->hold_timer, jiffies + pq->timeout);
1762 return;
1763 }
1764
1765 dst_release(dst);
1766
1767 __skb_queue_head_init(&list);
1768
1769 spin_lock(&pq->hold_queue.lock);
1770 pq->timeout = 0;
1771 skb_queue_splice_init(&pq->hold_queue, &list);
1772 spin_unlock(&pq->hold_queue.lock);
1773
1774 while (!skb_queue_empty(&list)) {
1775 skb = __skb_dequeue(&list);
1776
1777 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1778 dst_hold(skb_dst(skb)->path);
1779 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1780 &fl, skb->sk, 0);
1781 if (IS_ERR(dst)) {
1782 dev_put(skb->dev);
1783 kfree_skb(skb);
1784 continue;
1785 }
1786
1787 nf_reset(skb);
1788 skb_dst_drop(skb);
1789 skb_dst_set(skb, dst);
1790
1791 dev = skb->dev;
1792 err = dst_output(skb);
1793 dev_put(dev);
1794 }
1795
1796 return;
1797
1798purge_queue:
1799 pq->timeout = 0;
1800 xfrm_queue_purge(&pq->hold_queue);
1801}
1802
1803static int xdst_queue_output(struct sk_buff *skb)
1804{
1805 unsigned long sched_next;
1806 struct dst_entry *dst = skb_dst(skb);
1807 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1808 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
1809
1810 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1811 kfree_skb(skb);
1812 return -EAGAIN;
1813 }
1814
1815 skb_dst_force(skb);
1816 dev_hold(skb->dev);
1817
1818 spin_lock_bh(&pq->hold_queue.lock);
1819
1820 if (!pq->timeout)
1821 pq->timeout = XFRM_QUEUE_TMO_MIN;
1822
1823 sched_next = jiffies + pq->timeout;
1824
1825 if (del_timer(&pq->hold_timer)) {
1826 if (time_before(pq->hold_timer.expires, sched_next))
1827 sched_next = pq->hold_timer.expires;
1828 }
1829
1830 __skb_queue_tail(&pq->hold_queue, skb);
1831 mod_timer(&pq->hold_timer, sched_next);
1832
1833 spin_unlock_bh(&pq->hold_queue.lock);
1834
1835 return 0;
1836}
1837
1838static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1839 struct dst_entry *dst,
1840 const struct flowi *fl,
1841 int num_xfrms,
1842 u16 family)
1843{
1844 int err;
1845 struct net_device *dev;
1846 struct dst_entry *dst1;
1847 struct xfrm_dst *xdst;
1848
1849 xdst = xfrm_alloc_dst(net, family);
1850 if (IS_ERR(xdst))
1851 return xdst;
1852
1853 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1854 (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1855 return xdst;
1856
1857 dst1 = &xdst->u.dst;
1858 dst_hold(dst);
1859 xdst->route = dst;
1860
1861 dst_copy_metrics(dst1, dst);
1862
1863 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1864 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1865 dst1->lastuse = jiffies;
1866
1867 dst1->input = dst_discard;
1868 dst1->output = xdst_queue_output;
1869
1870 dst_hold(dst);
1871 dst1->child = dst;
1872 dst1->path = dst;
1873
1874 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1875
1876 err = -ENODEV;
1877 dev = dst->dev;
1878 if (!dev)
1879 goto free_dst;
1880
1881 err = xfrm_fill_dst(xdst, dev, fl);
1882 if (err)
1883 goto free_dst;
1884
1885out:
1886 return xdst;
1887
1888free_dst:
1889 dst_release(dst1);
1890 xdst = ERR_PTR(err);
1891 goto out;
1892}
1893
1676static struct flow_cache_object * 1894static struct flow_cache_object *
1677xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1895xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1678 struct flow_cache_object *oldflo, void *ctx) 1896 struct flow_cache_object *oldflo, void *ctx)
@@ -1751,7 +1969,7 @@ make_dummy_bundle:
1751 /* We found policies, but there's no bundles to instantiate: 1969 /* We found policies, but there's no bundles to instantiate:
1752 * either because the policy blocks, has no transformations or 1970 * either because the policy blocks, has no transformations or
1753 * we could not build template (no xfrm_states).*/ 1971 * we could not build template (no xfrm_states).*/
1754 xdst = xfrm_alloc_dst(net, family); 1972 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
1755 if (IS_ERR(xdst)) { 1973 if (IS_ERR(xdst)) {
1756 xfrm_pols_put(pols, num_pols); 1974 xfrm_pols_put(pols, num_pols);
1757 return ERR_CAST(xdst); 1975 return ERR_CAST(xdst);
@@ -2359,6 +2577,9 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
2359 (dst->dev && !netif_running(dst->dev))) 2577 (dst->dev && !netif_running(dst->dev)))
2360 return 0; 2578 return 0;
2361 2579
2580 if (dst->flags & DST_XFRM_QUEUE)
2581 return 1;
2582
2362 last = NULL; 2583 last = NULL;
2363 2584
2364 do { 2585 do {