diff options
Diffstat (limited to 'net/decnet/dn_route.c')
-rw-r--r-- | net/decnet/dn_route.c | 423 |
1 files changed, 231 insertions, 192 deletions
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 6585ea6d1182..74544bc6fdec 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -93,7 +93,7 @@ | |||
93 | 93 | ||
94 | struct dn_rt_hash_bucket | 94 | struct dn_rt_hash_bucket |
95 | { | 95 | { |
96 | struct dn_route *chain; | 96 | struct dn_route __rcu *chain; |
97 | spinlock_t lock; | 97 | spinlock_t lock; |
98 | }; | 98 | }; |
99 | 99 | ||
@@ -110,6 +110,9 @@ static unsigned long dn_rt_deadline; | |||
110 | 110 | ||
111 | static int dn_dst_gc(struct dst_ops *ops); | 111 | static int dn_dst_gc(struct dst_ops *ops); |
112 | static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); | 112 | static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); |
113 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); | ||
114 | static unsigned int dn_dst_default_mtu(const struct dst_entry *dst); | ||
115 | static void dn_dst_destroy(struct dst_entry *); | ||
113 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); | 116 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); |
114 | static void dn_dst_link_failure(struct sk_buff *); | 117 | static void dn_dst_link_failure(struct sk_buff *); |
115 | static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); | 118 | static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); |
@@ -129,12 +132,20 @@ static struct dst_ops dn_dst_ops = { | |||
129 | .gc_thresh = 128, | 132 | .gc_thresh = 128, |
130 | .gc = dn_dst_gc, | 133 | .gc = dn_dst_gc, |
131 | .check = dn_dst_check, | 134 | .check = dn_dst_check, |
135 | .default_advmss = dn_dst_default_advmss, | ||
136 | .default_mtu = dn_dst_default_mtu, | ||
137 | .cow_metrics = dst_cow_metrics_generic, | ||
138 | .destroy = dn_dst_destroy, | ||
132 | .negative_advice = dn_dst_negative_advice, | 139 | .negative_advice = dn_dst_negative_advice, |
133 | .link_failure = dn_dst_link_failure, | 140 | .link_failure = dn_dst_link_failure, |
134 | .update_pmtu = dn_dst_update_pmtu, | 141 | .update_pmtu = dn_dst_update_pmtu, |
135 | .entries = ATOMIC_INIT(0), | ||
136 | }; | 142 | }; |
137 | 143 | ||
144 | static void dn_dst_destroy(struct dst_entry *dst) | ||
145 | { | ||
146 | dst_destroy_metrics_generic(dst); | ||
147 | } | ||
148 | |||
138 | static __inline__ unsigned dn_hash(__le16 src, __le16 dst) | 149 | static __inline__ unsigned dn_hash(__le16 src, __le16 dst) |
139 | { | 150 | { |
140 | __u16 tmp = (__u16 __force)(src ^ dst); | 151 | __u16 tmp = (__u16 __force)(src ^ dst); |
@@ -158,15 +169,17 @@ static inline void dnrt_drop(struct dn_route *rt) | |||
158 | static void dn_dst_check_expire(unsigned long dummy) | 169 | static void dn_dst_check_expire(unsigned long dummy) |
159 | { | 170 | { |
160 | int i; | 171 | int i; |
161 | struct dn_route *rt, **rtp; | 172 | struct dn_route *rt; |
173 | struct dn_route __rcu **rtp; | ||
162 | unsigned long now = jiffies; | 174 | unsigned long now = jiffies; |
163 | unsigned long expire = 120 * HZ; | 175 | unsigned long expire = 120 * HZ; |
164 | 176 | ||
165 | for(i = 0; i <= dn_rt_hash_mask; i++) { | 177 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
166 | rtp = &dn_rt_hash_table[i].chain; | 178 | rtp = &dn_rt_hash_table[i].chain; |
167 | 179 | ||
168 | spin_lock(&dn_rt_hash_table[i].lock); | 180 | spin_lock(&dn_rt_hash_table[i].lock); |
169 | while((rt=*rtp) != NULL) { | 181 | while ((rt = rcu_dereference_protected(*rtp, |
182 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { | ||
170 | if (atomic_read(&rt->dst.__refcnt) || | 183 | if (atomic_read(&rt->dst.__refcnt) || |
171 | (now - rt->dst.lastuse) < expire) { | 184 | (now - rt->dst.lastuse) < expire) { |
172 | rtp = &rt->dst.dn_next; | 185 | rtp = &rt->dst.dn_next; |
@@ -187,17 +200,19 @@ static void dn_dst_check_expire(unsigned long dummy) | |||
187 | 200 | ||
188 | static int dn_dst_gc(struct dst_ops *ops) | 201 | static int dn_dst_gc(struct dst_ops *ops) |
189 | { | 202 | { |
190 | struct dn_route *rt, **rtp; | 203 | struct dn_route *rt; |
204 | struct dn_route __rcu **rtp; | ||
191 | int i; | 205 | int i; |
192 | unsigned long now = jiffies; | 206 | unsigned long now = jiffies; |
193 | unsigned long expire = 10 * HZ; | 207 | unsigned long expire = 10 * HZ; |
194 | 208 | ||
195 | for(i = 0; i <= dn_rt_hash_mask; i++) { | 209 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
196 | 210 | ||
197 | spin_lock_bh(&dn_rt_hash_table[i].lock); | 211 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
198 | rtp = &dn_rt_hash_table[i].chain; | 212 | rtp = &dn_rt_hash_table[i].chain; |
199 | 213 | ||
200 | while((rt=*rtp) != NULL) { | 214 | while ((rt = rcu_dereference_protected(*rtp, |
215 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { | ||
201 | if (atomic_read(&rt->dst.__refcnt) || | 216 | if (atomic_read(&rt->dst.__refcnt) || |
202 | (now - rt->dst.lastuse) < expire) { | 217 | (now - rt->dst.lastuse) < expire) { |
203 | rtp = &rt->dst.dn_next; | 218 | rtp = &rt->dst.dn_next; |
@@ -228,7 +243,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
228 | { | 243 | { |
229 | u32 min_mtu = 230; | 244 | u32 min_mtu = 230; |
230 | struct dn_dev *dn = dst->neighbour ? | 245 | struct dn_dev *dn = dst->neighbour ? |
231 | (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; | 246 | rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL; |
232 | 247 | ||
233 | if (dn && dn->use_long == 0) | 248 | if (dn && dn->use_long == 0) |
234 | min_mtu -= 6; | 249 | min_mtu -= 6; |
@@ -237,13 +252,14 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
237 | 252 | ||
238 | if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { | 253 | if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { |
239 | if (!(dst_metric_locked(dst, RTAX_MTU))) { | 254 | if (!(dst_metric_locked(dst, RTAX_MTU))) { |
240 | dst->metrics[RTAX_MTU-1] = mtu; | 255 | dst_metric_set(dst, RTAX_MTU, mtu); |
241 | dst_set_expires(dst, dn_rt_mtu_expires); | 256 | dst_set_expires(dst, dn_rt_mtu_expires); |
242 | } | 257 | } |
243 | if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { | 258 | if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { |
244 | u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; | 259 | u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; |
245 | if (dst_metric(dst, RTAX_ADVMSS) > mss) | 260 | u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); |
246 | dst->metrics[RTAX_ADVMSS-1] = mss; | 261 | if (!existing_mss || existing_mss > mss) |
262 | dst_metric_set(dst, RTAX_ADVMSS, mss); | ||
247 | } | 263 | } |
248 | } | 264 | } |
249 | } | 265 | } |
@@ -266,26 +282,28 @@ static void dn_dst_link_failure(struct sk_buff *skb) | |||
266 | { | 282 | { |
267 | } | 283 | } |
268 | 284 | ||
269 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 285 | static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) |
270 | { | 286 | { |
271 | return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | | 287 | return ((fl1->daddr ^ fl2->daddr) | |
272 | (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | | 288 | (fl1->saddr ^ fl2->saddr) | |
273 | (fl1->mark ^ fl2->mark) | | 289 | (fl1->flowidn_mark ^ fl2->flowidn_mark) | |
274 | (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | | 290 | (fl1->flowidn_scope ^ fl2->flowidn_scope) | |
275 | (fl1->oif ^ fl2->oif) | | 291 | (fl1->flowidn_oif ^ fl2->flowidn_oif) | |
276 | (fl1->iif ^ fl2->iif)) == 0; | 292 | (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; |
277 | } | 293 | } |
278 | 294 | ||
279 | static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) | 295 | static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) |
280 | { | 296 | { |
281 | struct dn_route *rth, **rthp; | 297 | struct dn_route *rth; |
298 | struct dn_route __rcu **rthp; | ||
282 | unsigned long now = jiffies; | 299 | unsigned long now = jiffies; |
283 | 300 | ||
284 | rthp = &dn_rt_hash_table[hash].chain; | 301 | rthp = &dn_rt_hash_table[hash].chain; |
285 | 302 | ||
286 | spin_lock_bh(&dn_rt_hash_table[hash].lock); | 303 | spin_lock_bh(&dn_rt_hash_table[hash].lock); |
287 | while((rth = *rthp) != NULL) { | 304 | while ((rth = rcu_dereference_protected(*rthp, |
288 | if (compare_keys(&rth->fl, &rt->fl)) { | 305 | lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) { |
306 | if (compare_keys(&rth->fld, &rt->fld)) { | ||
289 | /* Put it first */ | 307 | /* Put it first */ |
290 | *rthp = rth->dst.dn_next; | 308 | *rthp = rth->dst.dn_next; |
291 | rcu_assign_pointer(rth->dst.dn_next, | 309 | rcu_assign_pointer(rth->dst.dn_next, |
@@ -316,15 +334,15 @@ static void dn_run_flush(unsigned long dummy) | |||
316 | int i; | 334 | int i; |
317 | struct dn_route *rt, *next; | 335 | struct dn_route *rt, *next; |
318 | 336 | ||
319 | for(i = 0; i < dn_rt_hash_mask; i++) { | 337 | for (i = 0; i < dn_rt_hash_mask; i++) { |
320 | spin_lock_bh(&dn_rt_hash_table[i].lock); | 338 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
321 | 339 | ||
322 | if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) | 340 | if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) |
323 | goto nothing_to_declare; | 341 | goto nothing_to_declare; |
324 | 342 | ||
325 | for(; rt; rt=next) { | 343 | for(; rt; rt = next) { |
326 | next = rt->dst.dn_next; | 344 | next = rcu_dereference_raw(rt->dst.dn_next); |
327 | rt->dst.dn_next = NULL; | 345 | RCU_INIT_POINTER(rt->dst.dn_next, NULL); |
328 | dst_free((struct dst_entry *)rt); | 346 | dst_free((struct dst_entry *)rt); |
329 | } | 347 | } |
330 | 348 | ||
@@ -459,15 +477,16 @@ static int dn_return_long(struct sk_buff *skb) | |||
459 | */ | 477 | */ |
460 | static int dn_route_rx_packet(struct sk_buff *skb) | 478 | static int dn_route_rx_packet(struct sk_buff *skb) |
461 | { | 479 | { |
462 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 480 | struct dn_skb_cb *cb; |
463 | int err; | 481 | int err; |
464 | 482 | ||
465 | if ((err = dn_route_input(skb)) == 0) | 483 | if ((err = dn_route_input(skb)) == 0) |
466 | return dst_input(skb); | 484 | return dst_input(skb); |
467 | 485 | ||
486 | cb = DN_SKB_CB(skb); | ||
468 | if (decnet_debug_level & 4) { | 487 | if (decnet_debug_level & 4) { |
469 | char *devname = skb->dev ? skb->dev->name : "???"; | 488 | char *devname = skb->dev ? skb->dev->name : "???"; |
470 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 489 | |
471 | printk(KERN_DEBUG | 490 | printk(KERN_DEBUG |
472 | "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", | 491 | "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", |
473 | (int)cb->rt_flags, devname, skb->len, | 492 | (int)cb->rt_flags, devname, skb->len, |
@@ -574,7 +593,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type | |||
574 | struct dn_skb_cb *cb; | 593 | struct dn_skb_cb *cb; |
575 | unsigned char flags = 0; | 594 | unsigned char flags = 0; |
576 | __u16 len = le16_to_cpu(*(__le16 *)skb->data); | 595 | __u16 len = le16_to_cpu(*(__le16 *)skb->data); |
577 | struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; | 596 | struct dn_dev *dn = rcu_dereference(dev->dn_ptr); |
578 | unsigned char padlen = 0; | 597 | unsigned char padlen = 0; |
579 | 598 | ||
580 | if (!net_eq(dev_net(dev), &init_net)) | 599 | if (!net_eq(dev_net(dev), &init_net)) |
@@ -729,7 +748,7 @@ static int dn_forward(struct sk_buff *skb) | |||
729 | { | 748 | { |
730 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 749 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
731 | struct dst_entry *dst = skb_dst(skb); | 750 | struct dst_entry *dst = skb_dst(skb); |
732 | struct dn_dev *dn_db = dst->dev->dn_ptr; | 751 | struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); |
733 | struct dn_route *rt; | 752 | struct dn_route *rt; |
734 | struct neighbour *neigh = dst->neighbour; | 753 | struct neighbour *neigh = dst->neighbour; |
735 | int header_len; | 754 | int header_len; |
@@ -789,19 +808,28 @@ static int dn_rt_bug(struct sk_buff *skb) | |||
789 | return NET_RX_DROP; | 808 | return NET_RX_DROP; |
790 | } | 809 | } |
791 | 810 | ||
811 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) | ||
812 | { | ||
813 | return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); | ||
814 | } | ||
815 | |||
816 | static unsigned int dn_dst_default_mtu(const struct dst_entry *dst) | ||
817 | { | ||
818 | return dst->dev->mtu; | ||
819 | } | ||
820 | |||
792 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) | 821 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) |
793 | { | 822 | { |
794 | struct dn_fib_info *fi = res->fi; | 823 | struct dn_fib_info *fi = res->fi; |
795 | struct net_device *dev = rt->dst.dev; | 824 | struct net_device *dev = rt->dst.dev; |
825 | unsigned int mss_metric; | ||
796 | struct neighbour *n; | 826 | struct neighbour *n; |
797 | unsigned mss; | ||
798 | 827 | ||
799 | if (fi) { | 828 | if (fi) { |
800 | if (DN_FIB_RES_GW(*res) && | 829 | if (DN_FIB_RES_GW(*res) && |
801 | DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) | 830 | DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
802 | rt->rt_gateway = DN_FIB_RES_GW(*res); | 831 | rt->rt_gateway = DN_FIB_RES_GW(*res); |
803 | memcpy(rt->dst.metrics, fi->fib_metrics, | 832 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); |
804 | sizeof(rt->dst.metrics)); | ||
805 | } | 833 | } |
806 | rt->rt_type = res->type; | 834 | rt->rt_type = res->type; |
807 | 835 | ||
@@ -812,13 +840,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) | |||
812 | rt->dst.neighbour = n; | 840 | rt->dst.neighbour = n; |
813 | } | 841 | } |
814 | 842 | ||
815 | if (dst_metric(&rt->dst, RTAX_MTU) == 0 || | 843 | if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) |
816 | dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) | 844 | dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); |
817 | rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; | 845 | mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); |
818 | mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); | 846 | if (mss_metric) { |
819 | if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 || | 847 | unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); |
820 | dst_metric(&rt->dst, RTAX_ADVMSS) > mss) | 848 | if (mss_metric > mss) |
821 | rt->dst.metrics[RTAX_ADVMSS-1] = mss; | 849 | dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); |
850 | } | ||
822 | return 0; | 851 | return 0; |
823 | } | 852 | } |
824 | 853 | ||
@@ -836,13 +865,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2) | |||
836 | static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) | 865 | static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) |
837 | { | 866 | { |
838 | __le16 saddr = 0; | 867 | __le16 saddr = 0; |
839 | struct dn_dev *dn_db = dev->dn_ptr; | 868 | struct dn_dev *dn_db; |
840 | struct dn_ifaddr *ifa; | 869 | struct dn_ifaddr *ifa; |
841 | int best_match = 0; | 870 | int best_match = 0; |
842 | int ret; | 871 | int ret; |
843 | 872 | ||
844 | read_lock(&dev_base_lock); | 873 | rcu_read_lock(); |
845 | for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { | 874 | dn_db = rcu_dereference(dev->dn_ptr); |
875 | for (ifa = rcu_dereference(dn_db->ifa_list); | ||
876 | ifa != NULL; | ||
877 | ifa = rcu_dereference(ifa->ifa_next)) { | ||
846 | if (ifa->ifa_scope > scope) | 878 | if (ifa->ifa_scope > scope) |
847 | continue; | 879 | continue; |
848 | if (!daddr) { | 880 | if (!daddr) { |
@@ -855,7 +887,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int | |||
855 | if (best_match == 0) | 887 | if (best_match == 0) |
856 | saddr = ifa->ifa_local; | 888 | saddr = ifa->ifa_local; |
857 | } | 889 | } |
858 | read_unlock(&dev_base_lock); | 890 | rcu_read_unlock(); |
859 | 891 | ||
860 | return saddr; | 892 | return saddr; |
861 | } | 893 | } |
@@ -871,16 +903,16 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re | |||
871 | return (daddr&~mask)|res->fi->fib_nh->nh_gw; | 903 | return (daddr&~mask)|res->fi->fib_nh->nh_gw; |
872 | } | 904 | } |
873 | 905 | ||
874 | static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) | 906 | static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard) |
875 | { | 907 | { |
876 | struct flowi fl = { .nl_u = { .dn_u = | 908 | struct flowidn fld = { |
877 | { .daddr = oldflp->fld_dst, | 909 | .daddr = oldflp->daddr, |
878 | .saddr = oldflp->fld_src, | 910 | .saddr = oldflp->saddr, |
879 | .scope = RT_SCOPE_UNIVERSE, | 911 | .flowidn_scope = RT_SCOPE_UNIVERSE, |
880 | } }, | 912 | .flowidn_mark = oldflp->flowidn_mark, |
881 | .mark = oldflp->mark, | 913 | .flowidn_iif = init_net.loopback_dev->ifindex, |
882 | .iif = init_net.loopback_dev->ifindex, | 914 | .flowidn_oif = oldflp->flowidn_oif, |
883 | .oif = oldflp->oif }; | 915 | }; |
884 | struct dn_route *rt = NULL; | 916 | struct dn_route *rt = NULL; |
885 | struct net_device *dev_out = NULL, *dev; | 917 | struct net_device *dev_out = NULL, *dev; |
886 | struct neighbour *neigh = NULL; | 918 | struct neighbour *neigh = NULL; |
@@ -894,13 +926,14 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
894 | if (decnet_debug_level & 16) | 926 | if (decnet_debug_level & 16) |
895 | printk(KERN_DEBUG | 927 | printk(KERN_DEBUG |
896 | "dn_route_output_slow: dst=%04x src=%04x mark=%d" | 928 | "dn_route_output_slow: dst=%04x src=%04x mark=%d" |
897 | " iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst), | 929 | " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr), |
898 | le16_to_cpu(oldflp->fld_src), | 930 | le16_to_cpu(oldflp->saddr), |
899 | oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif); | 931 | oldflp->flowidn_mark, init_net.loopback_dev->ifindex, |
932 | oldflp->flowidn_oif); | ||
900 | 933 | ||
901 | /* If we have an output interface, verify its a DECnet device */ | 934 | /* If we have an output interface, verify its a DECnet device */ |
902 | if (oldflp->oif) { | 935 | if (oldflp->flowidn_oif) { |
903 | dev_out = dev_get_by_index(&init_net, oldflp->oif); | 936 | dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif); |
904 | err = -ENODEV; | 937 | err = -ENODEV; |
905 | if (dev_out && dev_out->dn_ptr == NULL) { | 938 | if (dev_out && dev_out->dn_ptr == NULL) { |
906 | dev_put(dev_out); | 939 | dev_put(dev_out); |
@@ -911,11 +944,11 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
911 | } | 944 | } |
912 | 945 | ||
913 | /* If we have a source address, verify that its a local address */ | 946 | /* If we have a source address, verify that its a local address */ |
914 | if (oldflp->fld_src) { | 947 | if (oldflp->saddr) { |
915 | err = -EADDRNOTAVAIL; | 948 | err = -EADDRNOTAVAIL; |
916 | 949 | ||
917 | if (dev_out) { | 950 | if (dev_out) { |
918 | if (dn_dev_islocal(dev_out, oldflp->fld_src)) | 951 | if (dn_dev_islocal(dev_out, oldflp->saddr)) |
919 | goto source_ok; | 952 | goto source_ok; |
920 | dev_put(dev_out); | 953 | dev_put(dev_out); |
921 | goto out; | 954 | goto out; |
@@ -924,11 +957,11 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
924 | for_each_netdev_rcu(&init_net, dev) { | 957 | for_each_netdev_rcu(&init_net, dev) { |
925 | if (!dev->dn_ptr) | 958 | if (!dev->dn_ptr) |
926 | continue; | 959 | continue; |
927 | if (!dn_dev_islocal(dev, oldflp->fld_src)) | 960 | if (!dn_dev_islocal(dev, oldflp->saddr)) |
928 | continue; | 961 | continue; |
929 | if ((dev->flags & IFF_LOOPBACK) && | 962 | if ((dev->flags & IFF_LOOPBACK) && |
930 | oldflp->fld_dst && | 963 | oldflp->daddr && |
931 | !dn_dev_islocal(dev, oldflp->fld_dst)) | 964 | !dn_dev_islocal(dev, oldflp->daddr)) |
932 | continue; | 965 | continue; |
933 | 966 | ||
934 | dev_out = dev; | 967 | dev_out = dev; |
@@ -943,22 +976,22 @@ source_ok: | |||
943 | } | 976 | } |
944 | 977 | ||
945 | /* No destination? Assume its local */ | 978 | /* No destination? Assume its local */ |
946 | if (!fl.fld_dst) { | 979 | if (!fld.daddr) { |
947 | fl.fld_dst = fl.fld_src; | 980 | fld.daddr = fld.saddr; |
948 | 981 | ||
949 | err = -EADDRNOTAVAIL; | 982 | err = -EADDRNOTAVAIL; |
950 | if (dev_out) | 983 | if (dev_out) |
951 | dev_put(dev_out); | 984 | dev_put(dev_out); |
952 | dev_out = init_net.loopback_dev; | 985 | dev_out = init_net.loopback_dev; |
953 | dev_hold(dev_out); | 986 | dev_hold(dev_out); |
954 | if (!fl.fld_dst) { | 987 | if (!fld.daddr) { |
955 | fl.fld_dst = | 988 | fld.daddr = |
956 | fl.fld_src = dnet_select_source(dev_out, 0, | 989 | fld.saddr = dnet_select_source(dev_out, 0, |
957 | RT_SCOPE_HOST); | 990 | RT_SCOPE_HOST); |
958 | if (!fl.fld_dst) | 991 | if (!fld.daddr) |
959 | goto out; | 992 | goto out; |
960 | } | 993 | } |
961 | fl.oif = init_net.loopback_dev->ifindex; | 994 | fld.flowidn_oif = init_net.loopback_dev->ifindex; |
962 | res.type = RTN_LOCAL; | 995 | res.type = RTN_LOCAL; |
963 | goto make_route; | 996 | goto make_route; |
964 | } | 997 | } |
@@ -967,8 +1000,8 @@ source_ok: | |||
967 | printk(KERN_DEBUG | 1000 | printk(KERN_DEBUG |
968 | "dn_route_output_slow: initial checks complete." | 1001 | "dn_route_output_slow: initial checks complete." |
969 | " dst=%o4x src=%04x oif=%d try_hard=%d\n", | 1002 | " dst=%o4x src=%04x oif=%d try_hard=%d\n", |
970 | le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src), | 1003 | le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), |
971 | fl.oif, try_hard); | 1004 | fld.flowidn_oif, try_hard); |
972 | 1005 | ||
973 | /* | 1006 | /* |
974 | * N.B. If the kernel is compiled without router support then | 1007 | * N.B. If the kernel is compiled without router support then |
@@ -976,7 +1009,7 @@ source_ok: | |||
976 | * will always be executed. | 1009 | * will always be executed. |
977 | */ | 1010 | */ |
978 | err = -ESRCH; | 1011 | err = -ESRCH; |
979 | if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { | 1012 | if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) { |
980 | struct dn_dev *dn_db; | 1013 | struct dn_dev *dn_db; |
981 | if (err != -ESRCH) | 1014 | if (err != -ESRCH) |
982 | goto out; | 1015 | goto out; |
@@ -991,19 +1024,19 @@ source_ok: | |||
991 | * here | 1024 | * here |
992 | */ | 1025 | */ |
993 | if (!try_hard) { | 1026 | if (!try_hard) { |
994 | neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst); | 1027 | neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr); |
995 | if (neigh) { | 1028 | if (neigh) { |
996 | if ((oldflp->oif && | 1029 | if ((oldflp->flowidn_oif && |
997 | (neigh->dev->ifindex != oldflp->oif)) || | 1030 | (neigh->dev->ifindex != oldflp->flowidn_oif)) || |
998 | (oldflp->fld_src && | 1031 | (oldflp->saddr && |
999 | (!dn_dev_islocal(neigh->dev, | 1032 | (!dn_dev_islocal(neigh->dev, |
1000 | oldflp->fld_src)))) { | 1033 | oldflp->saddr)))) { |
1001 | neigh_release(neigh); | 1034 | neigh_release(neigh); |
1002 | neigh = NULL; | 1035 | neigh = NULL; |
1003 | } else { | 1036 | } else { |
1004 | if (dev_out) | 1037 | if (dev_out) |
1005 | dev_put(dev_out); | 1038 | dev_put(dev_out); |
1006 | if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { | 1039 | if (dn_dev_islocal(neigh->dev, fld.daddr)) { |
1007 | dev_out = init_net.loopback_dev; | 1040 | dev_out = init_net.loopback_dev; |
1008 | res.type = RTN_LOCAL; | 1041 | res.type = RTN_LOCAL; |
1009 | } else { | 1042 | } else { |
@@ -1021,9 +1054,9 @@ source_ok: | |||
1021 | err = -ENODEV; | 1054 | err = -ENODEV; |
1022 | if (dev_out == NULL) | 1055 | if (dev_out == NULL) |
1023 | goto out; | 1056 | goto out; |
1024 | dn_db = dev_out->dn_ptr; | 1057 | dn_db = rcu_dereference_raw(dev_out->dn_ptr); |
1025 | /* Possible improvement - check all devices for local addr */ | 1058 | /* Possible improvement - check all devices for local addr */ |
1026 | if (dn_dev_islocal(dev_out, fl.fld_dst)) { | 1059 | if (dn_dev_islocal(dev_out, fld.daddr)) { |
1027 | dev_put(dev_out); | 1060 | dev_put(dev_out); |
1028 | dev_out = init_net.loopback_dev; | 1061 | dev_out = init_net.loopback_dev; |
1029 | dev_hold(dev_out); | 1062 | dev_hold(dev_out); |
@@ -1039,16 +1072,16 @@ select_source: | |||
1039 | if (neigh) | 1072 | if (neigh) |
1040 | gateway = ((struct dn_neigh *)neigh)->addr; | 1073 | gateway = ((struct dn_neigh *)neigh)->addr; |
1041 | if (gateway == 0) | 1074 | if (gateway == 0) |
1042 | gateway = fl.fld_dst; | 1075 | gateway = fld.daddr; |
1043 | if (fl.fld_src == 0) { | 1076 | if (fld.saddr == 0) { |
1044 | fl.fld_src = dnet_select_source(dev_out, gateway, | 1077 | fld.saddr = dnet_select_source(dev_out, gateway, |
1045 | res.type == RTN_LOCAL ? | 1078 | res.type == RTN_LOCAL ? |
1046 | RT_SCOPE_HOST : | 1079 | RT_SCOPE_HOST : |
1047 | RT_SCOPE_LINK); | 1080 | RT_SCOPE_LINK); |
1048 | if (fl.fld_src == 0 && res.type != RTN_LOCAL) | 1081 | if (fld.saddr == 0 && res.type != RTN_LOCAL) |
1049 | goto e_addr; | 1082 | goto e_addr; |
1050 | } | 1083 | } |
1051 | fl.oif = dev_out->ifindex; | 1084 | fld.flowidn_oif = dev_out->ifindex; |
1052 | goto make_route; | 1085 | goto make_route; |
1053 | } | 1086 | } |
1054 | free_res = 1; | 1087 | free_res = 1; |
@@ -1057,64 +1090,60 @@ select_source: | |||
1057 | goto e_inval; | 1090 | goto e_inval; |
1058 | 1091 | ||
1059 | if (res.type == RTN_LOCAL) { | 1092 | if (res.type == RTN_LOCAL) { |
1060 | if (!fl.fld_src) | 1093 | if (!fld.saddr) |
1061 | fl.fld_src = fl.fld_dst; | 1094 | fld.saddr = fld.daddr; |
1062 | if (dev_out) | 1095 | if (dev_out) |
1063 | dev_put(dev_out); | 1096 | dev_put(dev_out); |
1064 | dev_out = init_net.loopback_dev; | 1097 | dev_out = init_net.loopback_dev; |
1065 | dev_hold(dev_out); | 1098 | dev_hold(dev_out); |
1066 | fl.oif = dev_out->ifindex; | 1099 | fld.flowidn_oif = dev_out->ifindex; |
1067 | if (res.fi) | 1100 | if (res.fi) |
1068 | dn_fib_info_put(res.fi); | 1101 | dn_fib_info_put(res.fi); |
1069 | res.fi = NULL; | 1102 | res.fi = NULL; |
1070 | goto make_route; | 1103 | goto make_route; |
1071 | } | 1104 | } |
1072 | 1105 | ||
1073 | if (res.fi->fib_nhs > 1 && fl.oif == 0) | 1106 | if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) |
1074 | dn_fib_select_multipath(&fl, &res); | 1107 | dn_fib_select_multipath(&fld, &res); |
1075 | 1108 | ||
1076 | /* | 1109 | /* |
1077 | * We could add some logic to deal with default routes here and | 1110 | * We could add some logic to deal with default routes here and |
1078 | * get rid of some of the special casing above. | 1111 | * get rid of some of the special casing above. |
1079 | */ | 1112 | */ |
1080 | 1113 | ||
1081 | if (!fl.fld_src) | 1114 | if (!fld.saddr) |
1082 | fl.fld_src = DN_FIB_RES_PREFSRC(res); | 1115 | fld.saddr = DN_FIB_RES_PREFSRC(res); |
1083 | 1116 | ||
1084 | if (dev_out) | 1117 | if (dev_out) |
1085 | dev_put(dev_out); | 1118 | dev_put(dev_out); |
1086 | dev_out = DN_FIB_RES_DEV(res); | 1119 | dev_out = DN_FIB_RES_DEV(res); |
1087 | dev_hold(dev_out); | 1120 | dev_hold(dev_out); |
1088 | fl.oif = dev_out->ifindex; | 1121 | fld.flowidn_oif = dev_out->ifindex; |
1089 | gateway = DN_FIB_RES_GW(res); | 1122 | gateway = DN_FIB_RES_GW(res); |
1090 | 1123 | ||
1091 | make_route: | 1124 | make_route: |
1092 | if (dev_out->flags & IFF_LOOPBACK) | 1125 | if (dev_out->flags & IFF_LOOPBACK) |
1093 | flags |= RTCF_LOCAL; | 1126 | flags |= RTCF_LOCAL; |
1094 | 1127 | ||
1095 | rt = dst_alloc(&dn_dst_ops); | 1128 | rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST); |
1096 | if (rt == NULL) | 1129 | if (rt == NULL) |
1097 | goto e_nobufs; | 1130 | goto e_nobufs; |
1098 | 1131 | ||
1099 | atomic_set(&rt->dst.__refcnt, 1); | 1132 | memset(&rt->fld, 0, sizeof(rt->fld)); |
1100 | rt->dst.flags = DST_HOST; | 1133 | rt->fld.saddr = oldflp->saddr; |
1134 | rt->fld.daddr = oldflp->daddr; | ||
1135 | rt->fld.flowidn_oif = oldflp->flowidn_oif; | ||
1136 | rt->fld.flowidn_iif = 0; | ||
1137 | rt->fld.flowidn_mark = oldflp->flowidn_mark; | ||
1101 | 1138 | ||
1102 | rt->fl.fld_src = oldflp->fld_src; | 1139 | rt->rt_saddr = fld.saddr; |
1103 | rt->fl.fld_dst = oldflp->fld_dst; | 1140 | rt->rt_daddr = fld.daddr; |
1104 | rt->fl.oif = oldflp->oif; | 1141 | rt->rt_gateway = gateway ? gateway : fld.daddr; |
1105 | rt->fl.iif = 0; | 1142 | rt->rt_local_src = fld.saddr; |
1106 | rt->fl.mark = oldflp->mark; | ||
1107 | 1143 | ||
1108 | rt->rt_saddr = fl.fld_src; | 1144 | rt->rt_dst_map = fld.daddr; |
1109 | rt->rt_daddr = fl.fld_dst; | 1145 | rt->rt_src_map = fld.saddr; |
1110 | rt->rt_gateway = gateway ? gateway : fl.fld_dst; | ||
1111 | rt->rt_local_src = fl.fld_src; | ||
1112 | 1146 | ||
1113 | rt->rt_dst_map = fl.fld_dst; | ||
1114 | rt->rt_src_map = fl.fld_src; | ||
1115 | |||
1116 | rt->dst.dev = dev_out; | ||
1117 | dev_hold(dev_out); | ||
1118 | rt->dst.neighbour = neigh; | 1147 | rt->dst.neighbour = neigh; |
1119 | neigh = NULL; | 1148 | neigh = NULL; |
1120 | 1149 | ||
@@ -1129,7 +1158,7 @@ make_route: | |||
1129 | if (err) | 1158 | if (err) |
1130 | goto e_neighbour; | 1159 | goto e_neighbour; |
1131 | 1160 | ||
1132 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); | 1161 | hash = dn_hash(rt->fld.saddr, rt->fld.daddr); |
1133 | dn_insert_route(rt, hash, (struct dn_route **)pprt); | 1162 | dn_insert_route(rt, hash, (struct dn_route **)pprt); |
1134 | 1163 | ||
1135 | done: | 1164 | done: |
@@ -1160,20 +1189,20 @@ e_neighbour: | |||
1160 | /* | 1189 | /* |
1161 | * N.B. The flags may be moved into the flowi at some future stage. | 1190 | * N.B. The flags may be moved into the flowi at some future stage. |
1162 | */ | 1191 | */ |
1163 | static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) | 1192 | static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) |
1164 | { | 1193 | { |
1165 | unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); | 1194 | unsigned hash = dn_hash(flp->saddr, flp->daddr); |
1166 | struct dn_route *rt = NULL; | 1195 | struct dn_route *rt = NULL; |
1167 | 1196 | ||
1168 | if (!(flags & MSG_TRYHARD)) { | 1197 | if (!(flags & MSG_TRYHARD)) { |
1169 | rcu_read_lock_bh(); | 1198 | rcu_read_lock_bh(); |
1170 | for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; | 1199 | for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; |
1171 | rt = rcu_dereference_bh(rt->dst.dn_next)) { | 1200 | rt = rcu_dereference_bh(rt->dst.dn_next)) { |
1172 | if ((flp->fld_dst == rt->fl.fld_dst) && | 1201 | if ((flp->daddr == rt->fld.daddr) && |
1173 | (flp->fld_src == rt->fl.fld_src) && | 1202 | (flp->saddr == rt->fld.saddr) && |
1174 | (flp->mark == rt->fl.mark) && | 1203 | (flp->flowidn_mark == rt->fld.flowidn_mark) && |
1175 | (rt->fl.iif == 0) && | 1204 | dn_is_output_route(rt) && |
1176 | (rt->fl.oif == flp->oif)) { | 1205 | (rt->fld.flowidn_oif == flp->flowidn_oif)) { |
1177 | dst_use(&rt->dst, jiffies); | 1206 | dst_use(&rt->dst, jiffies); |
1178 | rcu_read_unlock_bh(); | 1207 | rcu_read_unlock_bh(); |
1179 | *pprt = &rt->dst; | 1208 | *pprt = &rt->dst; |
@@ -1186,25 +1215,36 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl | |||
1186 | return dn_route_output_slow(pprt, flp, flags); | 1215 | return dn_route_output_slow(pprt, flp, flags); |
1187 | } | 1216 | } |
1188 | 1217 | ||
1189 | static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) | 1218 | static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags) |
1190 | { | 1219 | { |
1191 | int err; | 1220 | int err; |
1192 | 1221 | ||
1193 | err = __dn_route_output_key(pprt, flp, flags); | 1222 | err = __dn_route_output_key(pprt, flp, flags); |
1194 | if (err == 0 && flp->proto) { | 1223 | if (err == 0 && flp->flowidn_proto) { |
1195 | err = xfrm_lookup(&init_net, pprt, flp, NULL, 0); | 1224 | *pprt = xfrm_lookup(&init_net, *pprt, |
1225 | flowidn_to_flowi(flp), NULL, 0); | ||
1226 | if (IS_ERR(*pprt)) { | ||
1227 | err = PTR_ERR(*pprt); | ||
1228 | *pprt = NULL; | ||
1229 | } | ||
1196 | } | 1230 | } |
1197 | return err; | 1231 | return err; |
1198 | } | 1232 | } |
1199 | 1233 | ||
1200 | int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) | 1234 | int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags) |
1201 | { | 1235 | { |
1202 | int err; | 1236 | int err; |
1203 | 1237 | ||
1204 | err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); | 1238 | err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); |
1205 | if (err == 0 && fl->proto) { | 1239 | if (err == 0 && fl->flowidn_proto) { |
1206 | err = xfrm_lookup(&init_net, pprt, fl, sk, | 1240 | if (!(flags & MSG_DONTWAIT)) |
1207 | (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT); | 1241 | fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP; |
1242 | *pprt = xfrm_lookup(&init_net, *pprt, | ||
1243 | flowidn_to_flowi(fl), sk, 0); | ||
1244 | if (IS_ERR(*pprt)) { | ||
1245 | err = PTR_ERR(*pprt); | ||
1246 | *pprt = NULL; | ||
1247 | } | ||
1208 | } | 1248 | } |
1209 | return err; | 1249 | return err; |
1210 | } | 1250 | } |
@@ -1221,24 +1261,24 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1221 | int flags = 0; | 1261 | int flags = 0; |
1222 | __le16 gateway = 0; | 1262 | __le16 gateway = 0; |
1223 | __le16 local_src = 0; | 1263 | __le16 local_src = 0; |
1224 | struct flowi fl = { .nl_u = { .dn_u = | 1264 | struct flowidn fld = { |
1225 | { .daddr = cb->dst, | 1265 | .daddr = cb->dst, |
1226 | .saddr = cb->src, | 1266 | .saddr = cb->src, |
1227 | .scope = RT_SCOPE_UNIVERSE, | 1267 | .flowidn_scope = RT_SCOPE_UNIVERSE, |
1228 | } }, | 1268 | .flowidn_mark = skb->mark, |
1229 | .mark = skb->mark, | 1269 | .flowidn_iif = skb->dev->ifindex, |
1230 | .iif = skb->dev->ifindex }; | 1270 | }; |
1231 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; | 1271 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; |
1232 | int err = -EINVAL; | 1272 | int err = -EINVAL; |
1233 | int free_res = 0; | 1273 | int free_res = 0; |
1234 | 1274 | ||
1235 | dev_hold(in_dev); | 1275 | dev_hold(in_dev); |
1236 | 1276 | ||
1237 | if ((dn_db = in_dev->dn_ptr) == NULL) | 1277 | if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL) |
1238 | goto out; | 1278 | goto out; |
1239 | 1279 | ||
1240 | /* Zero source addresses are not allowed */ | 1280 | /* Zero source addresses are not allowed */ |
1241 | if (fl.fld_src == 0) | 1281 | if (fld.saddr == 0) |
1242 | goto out; | 1282 | goto out; |
1243 | 1283 | ||
1244 | /* | 1284 | /* |
@@ -1252,7 +1292,7 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1252 | if (dn_dev_islocal(in_dev, cb->src)) | 1292 | if (dn_dev_islocal(in_dev, cb->src)) |
1253 | goto out; | 1293 | goto out; |
1254 | 1294 | ||
1255 | err = dn_fib_lookup(&fl, &res); | 1295 | err = dn_fib_lookup(&fld, &res); |
1256 | if (err) { | 1296 | if (err) { |
1257 | if (err != -ESRCH) | 1297 | if (err != -ESRCH) |
1258 | goto out; | 1298 | goto out; |
@@ -1264,7 +1304,7 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1264 | 1304 | ||
1265 | res.type = RTN_LOCAL; | 1305 | res.type = RTN_LOCAL; |
1266 | } else { | 1306 | } else { |
1267 | __le16 src_map = fl.fld_src; | 1307 | __le16 src_map = fld.saddr; |
1268 | free_res = 1; | 1308 | free_res = 1; |
1269 | 1309 | ||
1270 | out_dev = DN_FIB_RES_DEV(res); | 1310 | out_dev = DN_FIB_RES_DEV(res); |
@@ -1277,22 +1317,22 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1277 | dev_hold(out_dev); | 1317 | dev_hold(out_dev); |
1278 | 1318 | ||
1279 | if (res.r) | 1319 | if (res.r) |
1280 | src_map = fl.fld_src; /* no NAT support for now */ | 1320 | src_map = fld.saddr; /* no NAT support for now */ |
1281 | 1321 | ||
1282 | gateway = DN_FIB_RES_GW(res); | 1322 | gateway = DN_FIB_RES_GW(res); |
1283 | if (res.type == RTN_NAT) { | 1323 | if (res.type == RTN_NAT) { |
1284 | fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); | 1324 | fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res); |
1285 | dn_fib_res_put(&res); | 1325 | dn_fib_res_put(&res); |
1286 | free_res = 0; | 1326 | free_res = 0; |
1287 | if (dn_fib_lookup(&fl, &res)) | 1327 | if (dn_fib_lookup(&fld, &res)) |
1288 | goto e_inval; | 1328 | goto e_inval; |
1289 | free_res = 1; | 1329 | free_res = 1; |
1290 | if (res.type != RTN_UNICAST) | 1330 | if (res.type != RTN_UNICAST) |
1291 | goto e_inval; | 1331 | goto e_inval; |
1292 | flags |= RTCF_DNAT; | 1332 | flags |= RTCF_DNAT; |
1293 | gateway = fl.fld_dst; | 1333 | gateway = fld.daddr; |
1294 | } | 1334 | } |
1295 | fl.fld_src = src_map; | 1335 | fld.saddr = src_map; |
1296 | } | 1336 | } |
1297 | 1337 | ||
1298 | switch(res.type) { | 1338 | switch(res.type) { |
@@ -1306,8 +1346,8 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1306 | if (dn_db->parms.forwarding == 0) | 1346 | if (dn_db->parms.forwarding == 0) |
1307 | goto e_inval; | 1347 | goto e_inval; |
1308 | 1348 | ||
1309 | if (res.fi->fib_nhs > 1 && fl.oif == 0) | 1349 | if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) |
1310 | dn_fib_select_multipath(&fl, &res); | 1350 | dn_fib_select_multipath(&fld, &res); |
1311 | 1351 | ||
1312 | /* | 1352 | /* |
1313 | * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT | 1353 | * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT |
@@ -1325,8 +1365,8 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1325 | break; | 1365 | break; |
1326 | case RTN_LOCAL: | 1366 | case RTN_LOCAL: |
1327 | flags |= RTCF_LOCAL; | 1367 | flags |= RTCF_LOCAL; |
1328 | fl.fld_src = cb->dst; | 1368 | fld.saddr = cb->dst; |
1329 | fl.fld_dst = cb->src; | 1369 | fld.daddr = cb->src; |
1330 | 1370 | ||
1331 | /* Routing tables gave us a gateway */ | 1371 | /* Routing tables gave us a gateway */ |
1332 | if (gateway) | 1372 | if (gateway) |
@@ -1355,29 +1395,28 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1355 | } | 1395 | } |
1356 | 1396 | ||
1357 | make_route: | 1397 | make_route: |
1358 | rt = dst_alloc(&dn_dst_ops); | 1398 | rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST); |
1359 | if (rt == NULL) | 1399 | if (rt == NULL) |
1360 | goto e_nobufs; | 1400 | goto e_nobufs; |
1361 | 1401 | ||
1362 | rt->rt_saddr = fl.fld_src; | 1402 | memset(&rt->fld, 0, sizeof(rt->fld)); |
1363 | rt->rt_daddr = fl.fld_dst; | 1403 | rt->rt_saddr = fld.saddr; |
1364 | rt->rt_gateway = fl.fld_dst; | 1404 | rt->rt_daddr = fld.daddr; |
1405 | rt->rt_gateway = fld.daddr; | ||
1365 | if (gateway) | 1406 | if (gateway) |
1366 | rt->rt_gateway = gateway; | 1407 | rt->rt_gateway = gateway; |
1367 | rt->rt_local_src = local_src ? local_src : rt->rt_saddr; | 1408 | rt->rt_local_src = local_src ? local_src : rt->rt_saddr; |
1368 | 1409 | ||
1369 | rt->rt_dst_map = fl.fld_dst; | 1410 | rt->rt_dst_map = fld.daddr; |
1370 | rt->rt_src_map = fl.fld_src; | 1411 | rt->rt_src_map = fld.saddr; |
1371 | 1412 | ||
1372 | rt->fl.fld_src = cb->src; | 1413 | rt->fld.saddr = cb->src; |
1373 | rt->fl.fld_dst = cb->dst; | 1414 | rt->fld.daddr = cb->dst; |
1374 | rt->fl.oif = 0; | 1415 | rt->fld.flowidn_oif = 0; |
1375 | rt->fl.iif = in_dev->ifindex; | 1416 | rt->fld.flowidn_iif = in_dev->ifindex; |
1376 | rt->fl.mark = fl.mark; | 1417 | rt->fld.flowidn_mark = fld.flowidn_mark; |
1377 | 1418 | ||
1378 | rt->dst.flags = DST_HOST; | ||
1379 | rt->dst.neighbour = neigh; | 1419 | rt->dst.neighbour = neigh; |
1380 | rt->dst.dev = out_dev; | ||
1381 | rt->dst.lastuse = jiffies; | 1420 | rt->dst.lastuse = jiffies; |
1382 | rt->dst.output = dn_rt_bug; | 1421 | rt->dst.output = dn_rt_bug; |
1383 | switch(res.type) { | 1422 | switch(res.type) { |
@@ -1396,14 +1435,12 @@ make_route: | |||
1396 | rt->dst.input = dst_discard; | 1435 | rt->dst.input = dst_discard; |
1397 | } | 1436 | } |
1398 | rt->rt_flags = flags; | 1437 | rt->rt_flags = flags; |
1399 | if (rt->dst.dev) | ||
1400 | dev_hold(rt->dst.dev); | ||
1401 | 1438 | ||
1402 | err = dn_rt_set_next_hop(rt, &res); | 1439 | err = dn_rt_set_next_hop(rt, &res); |
1403 | if (err) | 1440 | if (err) |
1404 | goto e_neighbour; | 1441 | goto e_neighbour; |
1405 | 1442 | ||
1406 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); | 1443 | hash = dn_hash(rt->fld.saddr, rt->fld.daddr); |
1407 | dn_insert_route(rt, hash, &rt); | 1444 | dn_insert_route(rt, hash, &rt); |
1408 | skb_dst_set(skb, &rt->dst); | 1445 | skb_dst_set(skb, &rt->dst); |
1409 | 1446 | ||
@@ -1443,11 +1480,11 @@ static int dn_route_input(struct sk_buff *skb) | |||
1443 | rcu_read_lock(); | 1480 | rcu_read_lock(); |
1444 | for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; | 1481 | for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; |
1445 | rt = rcu_dereference(rt->dst.dn_next)) { | 1482 | rt = rcu_dereference(rt->dst.dn_next)) { |
1446 | if ((rt->fl.fld_src == cb->src) && | 1483 | if ((rt->fld.saddr == cb->src) && |
1447 | (rt->fl.fld_dst == cb->dst) && | 1484 | (rt->fld.daddr == cb->dst) && |
1448 | (rt->fl.oif == 0) && | 1485 | (rt->fld.flowidn_oif == 0) && |
1449 | (rt->fl.mark == skb->mark) && | 1486 | (rt->fld.flowidn_mark == skb->mark) && |
1450 | (rt->fl.iif == cb->iif)) { | 1487 | (rt->fld.flowidn_iif == cb->iif)) { |
1451 | dst_use(&rt->dst, jiffies); | 1488 | dst_use(&rt->dst, jiffies); |
1452 | rcu_read_unlock(); | 1489 | rcu_read_unlock(); |
1453 | skb_dst_set(skb, (struct dst_entry *)rt); | 1490 | skb_dst_set(skb, (struct dst_entry *)rt); |
@@ -1483,9 +1520,9 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1483 | if (rt->rt_flags & RTCF_NOTIFY) | 1520 | if (rt->rt_flags & RTCF_NOTIFY) |
1484 | r->rtm_flags |= RTM_F_NOTIFY; | 1521 | r->rtm_flags |= RTM_F_NOTIFY; |
1485 | RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); | 1522 | RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); |
1486 | if (rt->fl.fld_src) { | 1523 | if (rt->fld.saddr) { |
1487 | r->rtm_src_len = 16; | 1524 | r->rtm_src_len = 16; |
1488 | RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); | 1525 | RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr); |
1489 | } | 1526 | } |
1490 | if (rt->dst.dev) | 1527 | if (rt->dst.dev) |
1491 | RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); | 1528 | RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); |
@@ -1497,14 +1534,14 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1497 | RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); | 1534 | RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); |
1498 | if (rt->rt_daddr != rt->rt_gateway) | 1535 | if (rt->rt_daddr != rt->rt_gateway) |
1499 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); | 1536 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); |
1500 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) | 1537 | if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) |
1501 | goto rtattr_failure; | 1538 | goto rtattr_failure; |
1502 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; | 1539 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; |
1503 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, | 1540 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, |
1504 | rt->dst.error) < 0) | 1541 | rt->dst.error) < 0) |
1505 | goto rtattr_failure; | 1542 | goto rtattr_failure; |
1506 | if (rt->fl.iif) | 1543 | if (dn_is_input_route(rt)) |
1507 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); | 1544 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif); |
1508 | 1545 | ||
1509 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 1546 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
1510 | return skb->len; | 1547 | return skb->len; |
@@ -1527,13 +1564,13 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1527 | struct dn_skb_cb *cb; | 1564 | struct dn_skb_cb *cb; |
1528 | int err; | 1565 | int err; |
1529 | struct sk_buff *skb; | 1566 | struct sk_buff *skb; |
1530 | struct flowi fl; | 1567 | struct flowidn fld; |
1531 | 1568 | ||
1532 | if (!net_eq(net, &init_net)) | 1569 | if (!net_eq(net, &init_net)) |
1533 | return -EINVAL; | 1570 | return -EINVAL; |
1534 | 1571 | ||
1535 | memset(&fl, 0, sizeof(fl)); | 1572 | memset(&fld, 0, sizeof(fld)); |
1536 | fl.proto = DNPROTO_NSP; | 1573 | fld.flowidn_proto = DNPROTO_NSP; |
1537 | 1574 | ||
1538 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 1575 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1539 | if (skb == NULL) | 1576 | if (skb == NULL) |
@@ -1542,15 +1579,15 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1542 | cb = DN_SKB_CB(skb); | 1579 | cb = DN_SKB_CB(skb); |
1543 | 1580 | ||
1544 | if (rta[RTA_SRC-1]) | 1581 | if (rta[RTA_SRC-1]) |
1545 | memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); | 1582 | memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2); |
1546 | if (rta[RTA_DST-1]) | 1583 | if (rta[RTA_DST-1]) |
1547 | memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); | 1584 | memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2); |
1548 | if (rta[RTA_IIF-1]) | 1585 | if (rta[RTA_IIF-1]) |
1549 | memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); | 1586 | memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); |
1550 | 1587 | ||
1551 | if (fl.iif) { | 1588 | if (fld.flowidn_iif) { |
1552 | struct net_device *dev; | 1589 | struct net_device *dev; |
1553 | if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) { | 1590 | if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) { |
1554 | kfree_skb(skb); | 1591 | kfree_skb(skb); |
1555 | return -ENODEV; | 1592 | return -ENODEV; |
1556 | } | 1593 | } |
@@ -1561,8 +1598,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1561 | } | 1598 | } |
1562 | skb->protocol = htons(ETH_P_DNA_RT); | 1599 | skb->protocol = htons(ETH_P_DNA_RT); |
1563 | skb->dev = dev; | 1600 | skb->dev = dev; |
1564 | cb->src = fl.fld_src; | 1601 | cb->src = fld.saddr; |
1565 | cb->dst = fl.fld_dst; | 1602 | cb->dst = fld.daddr; |
1566 | local_bh_disable(); | 1603 | local_bh_disable(); |
1567 | err = dn_route_input(skb); | 1604 | err = dn_route_input(skb); |
1568 | local_bh_enable(); | 1605 | local_bh_enable(); |
@@ -1574,8 +1611,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1574 | int oif = 0; | 1611 | int oif = 0; |
1575 | if (rta[RTA_OIF - 1]) | 1612 | if (rta[RTA_OIF - 1]) |
1576 | memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); | 1613 | memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); |
1577 | fl.oif = oif; | 1614 | fld.flowidn_oif = oif; |
1578 | err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); | 1615 | err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); |
1579 | } | 1616 | } |
1580 | 1617 | ||
1581 | if (skb->dev) | 1618 | if (skb->dev) |
@@ -1678,15 +1715,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou | |||
1678 | { | 1715 | { |
1679 | struct dn_rt_cache_iter_state *s = seq->private; | 1716 | struct dn_rt_cache_iter_state *s = seq->private; |
1680 | 1717 | ||
1681 | rt = rt->dst.dn_next; | 1718 | rt = rcu_dereference_bh(rt->dst.dn_next); |
1682 | while(!rt) { | 1719 | while (!rt) { |
1683 | rcu_read_unlock_bh(); | 1720 | rcu_read_unlock_bh(); |
1684 | if (--s->bucket < 0) | 1721 | if (--s->bucket < 0) |
1685 | break; | 1722 | break; |
1686 | rcu_read_lock_bh(); | 1723 | rcu_read_lock_bh(); |
1687 | rt = dn_rt_hash_table[s->bucket].chain; | 1724 | rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); |
1688 | } | 1725 | } |
1689 | return rcu_dereference_bh(rt); | 1726 | return rt; |
1690 | } | 1727 | } |
1691 | 1728 | ||
1692 | static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | 1729 | static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -1758,6 +1795,7 @@ void __init dn_route_init(void) | |||
1758 | dn_dst_ops.kmem_cachep = | 1795 | dn_dst_ops.kmem_cachep = |
1759 | kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, | 1796 | kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, |
1760 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1797 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1798 | dst_entries_init(&dn_dst_ops); | ||
1761 | setup_timer(&dn_route_timer, dn_dst_check_expire, 0); | 1799 | setup_timer(&dn_route_timer, dn_dst_check_expire, 0); |
1762 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; | 1800 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; |
1763 | add_timer(&dn_route_timer); | 1801 | add_timer(&dn_route_timer); |
@@ -1816,5 +1854,6 @@ void __exit dn_route_cleanup(void) | |||
1816 | dn_run_flush(0); | 1854 | dn_run_flush(0); |
1817 | 1855 | ||
1818 | proc_net_remove(&init_net, "decnet_cache"); | 1856 | proc_net_remove(&init_net, "decnet_cache"); |
1857 | dst_entries_destroy(&dn_dst_ops); | ||
1819 | } | 1858 | } |
1820 | 1859 | ||