aboutsummaryrefslogtreecommitdiffstats
path: root/net/decnet/dn_route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/decnet/dn_route.c')
-rw-r--r--net/decnet/dn_route.c204
1 files changed, 102 insertions, 102 deletions
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9881933167bd..efccc42ff1c6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -43,7 +43,7 @@
43 43
44/****************************************************************************** 44/******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
46 46
47 This program is free software; you can redistribute it and/or modify 47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by 48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or 49 the Free Software Foundation; either version 2 of the License, or
@@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy)
167 while((rt=*rtp) != NULL) { 167 while((rt=*rtp) != NULL) {
168 if (atomic_read(&rt->u.dst.__refcnt) || 168 if (atomic_read(&rt->u.dst.__refcnt) ||
169 (now - rt->u.dst.lastuse) < expire) { 169 (now - rt->u.dst.lastuse) < expire) {
170 rtp = &rt->u.rt_next; 170 rtp = &rt->u.dst.dn_next;
171 continue; 171 continue;
172 } 172 }
173 *rtp = rt->u.rt_next; 173 *rtp = rt->u.dst.dn_next;
174 rt->u.rt_next = NULL; 174 rt->u.dst.dn_next = NULL;
175 dnrt_free(rt); 175 dnrt_free(rt);
176 } 176 }
177 spin_unlock(&dn_rt_hash_table[i].lock); 177 spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,11 +198,11 @@ static int dn_dst_gc(void)
198 while((rt=*rtp) != NULL) { 198 while((rt=*rtp) != NULL) {
199 if (atomic_read(&rt->u.dst.__refcnt) || 199 if (atomic_read(&rt->u.dst.__refcnt) ||
200 (now - rt->u.dst.lastuse) < expire) { 200 (now - rt->u.dst.lastuse) < expire) {
201 rtp = &rt->u.rt_next; 201 rtp = &rt->u.dst.dn_next;
202 continue; 202 continue;
203 } 203 }
204 *rtp = rt->u.rt_next; 204 *rtp = rt->u.dst.dn_next;
205 rt->u.rt_next = NULL; 205 rt->u.dst.dn_next = NULL;
206 dnrt_drop(rt); 206 dnrt_drop(rt);
207 break; 207 break;
208 } 208 }
@@ -246,7 +246,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
246 } 246 }
247} 247}
248 248
249/* 249/*
250 * When a route has been marked obsolete. (e.g. routing cache flush) 250 * When a route has been marked obsolete. (e.g. routing cache flush)
251 */ 251 */
252static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 252static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
@@ -286,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
286 while((rth = *rthp) != NULL) { 286 while((rth = *rthp) != NULL) {
287 if (compare_keys(&rth->fl, &rt->fl)) { 287 if (compare_keys(&rth->fl, &rt->fl)) {
288 /* Put it first */ 288 /* Put it first */
289 *rthp = rth->u.rt_next; 289 *rthp = rth->u.dst.dn_next;
290 rcu_assign_pointer(rth->u.rt_next, 290 rcu_assign_pointer(rth->u.dst.dn_next,
291 dn_rt_hash_table[hash].chain); 291 dn_rt_hash_table[hash].chain);
292 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 292 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
293 293
@@ -300,12 +300,12 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
300 *rp = rth; 300 *rp = rth;
301 return 0; 301 return 0;
302 } 302 }
303 rthp = &rth->u.rt_next; 303 rthp = &rth->u.dst.dn_next;
304 } 304 }
305 305
306 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); 306 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
307 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 307 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
308 308
309 dst_hold(&rt->u.dst); 309 dst_hold(&rt->u.dst);
310 rt->u.dst.__use++; 310 rt->u.dst.__use++;
311 rt->u.dst.lastuse = now; 311 rt->u.dst.lastuse = now;
@@ -326,8 +326,8 @@ void dn_run_flush(unsigned long dummy)
326 goto nothing_to_declare; 326 goto nothing_to_declare;
327 327
328 for(; rt; rt=next) { 328 for(; rt; rt=next) {
329 next = rt->u.rt_next; 329 next = rt->u.dst.dn_next;
330 rt->u.rt_next = NULL; 330 rt->u.dst.dn_next = NULL;
331 dst_free((struct dst_entry *)rt); 331 dst_free((struct dst_entry *)rt);
332 } 332 }
333 333
@@ -506,23 +506,23 @@ static int dn_route_rx_long(struct sk_buff *skb)
506 skb_pull(skb, 20); 506 skb_pull(skb, 20);
507 skb->h.raw = skb->data; 507 skb->h.raw = skb->data;
508 508
509 /* Destination info */ 509 /* Destination info */
510 ptr += 2; 510 ptr += 2;
511 cb->dst = dn_eth2dn(ptr); 511 cb->dst = dn_eth2dn(ptr);
512 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 512 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
513 goto drop_it; 513 goto drop_it;
514 ptr += 6; 514 ptr += 6;
515 515
516 516
517 /* Source info */ 517 /* Source info */
518 ptr += 2; 518 ptr += 2;
519 cb->src = dn_eth2dn(ptr); 519 cb->src = dn_eth2dn(ptr);
520 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 520 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
521 goto drop_it; 521 goto drop_it;
522 ptr += 6; 522 ptr += 6;
523 /* Other junk */ 523 /* Other junk */
524 ptr++; 524 ptr++;
525 cb->hops = *ptr++; /* Visit Count */ 525 cb->hops = *ptr++; /* Visit Count */
526 526
527 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 527 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
528 528
@@ -545,16 +545,16 @@ static int dn_route_rx_short(struct sk_buff *skb)
545 skb->h.raw = skb->data; 545 skb->h.raw = skb->data;
546 546
547 cb->dst = *(__le16 *)ptr; 547 cb->dst = *(__le16 *)ptr;
548 ptr += 2; 548 ptr += 2;
549 cb->src = *(__le16 *)ptr; 549 cb->src = *(__le16 *)ptr;
550 ptr += 2; 550 ptr += 2;
551 cb->hops = *ptr & 0x3f; 551 cb->hops = *ptr & 0x3f;
552 552
553 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 553 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
554 554
555drop_it: 555drop_it:
556 kfree_skb(skb); 556 kfree_skb(skb);
557 return NET_RX_DROP; 557 return NET_RX_DROP;
558} 558}
559 559
560static int dn_route_discard(struct sk_buff *skb) 560static int dn_route_discard(struct sk_buff *skb)
@@ -626,20 +626,20 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
626 cb->rt_flags = flags; 626 cb->rt_flags = flags;
627 627
628 if (decnet_debug_level & 1) 628 if (decnet_debug_level & 1)
629 printk(KERN_DEBUG 629 printk(KERN_DEBUG
630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
631 (int)flags, (dev) ? dev->name : "???", len, skb->len, 631 (int)flags, (dev) ? dev->name : "???", len, skb->len,
632 padlen); 632 padlen);
633 633
634 if (flags & DN_RT_PKT_CNTL) { 634 if (flags & DN_RT_PKT_CNTL) {
635 if (unlikely(skb_linearize(skb))) 635 if (unlikely(skb_linearize(skb)))
636 goto dump_it; 636 goto dump_it;
637 637
638 switch(flags & DN_RT_CNTL_MSK) { 638 switch(flags & DN_RT_CNTL_MSK) {
639 case DN_RT_PKT_INIT: 639 case DN_RT_PKT_INIT:
640 dn_dev_init_pkt(skb); 640 dn_dev_init_pkt(skb);
641 break; 641 break;
642 case DN_RT_PKT_VERI: 642 case DN_RT_PKT_VERI:
643 dn_dev_veri_pkt(skb); 643 dn_dev_veri_pkt(skb);
644 break; 644 break;
645 } 645 }
@@ -648,31 +648,31 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
648 goto dump_it; 648 goto dump_it;
649 649
650 switch(flags & DN_RT_CNTL_MSK) { 650 switch(flags & DN_RT_CNTL_MSK) {
651 case DN_RT_PKT_HELO: 651 case DN_RT_PKT_HELO:
652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
653 653
654 case DN_RT_PKT_L1RT: 654 case DN_RT_PKT_L1RT:
655 case DN_RT_PKT_L2RT: 655 case DN_RT_PKT_L2RT:
656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
657 case DN_RT_PKT_ERTH: 657 case DN_RT_PKT_ERTH:
658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
659 659
660 case DN_RT_PKT_EEDH: 660 case DN_RT_PKT_EEDH:
661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
662 } 662 }
663 } else { 663 } else {
664 if (dn->parms.state != DN_DEV_S_RU) 664 if (dn->parms.state != DN_DEV_S_RU)
665 goto dump_it; 665 goto dump_it;
666 666
667 skb_pull(skb, 1); /* Pull flags */ 667 skb_pull(skb, 1); /* Pull flags */
668 668
669 switch(flags & DN_RT_PKT_MSK) { 669 switch(flags & DN_RT_PKT_MSK) {
670 case DN_RT_PKT_LONG: 670 case DN_RT_PKT_LONG:
671 return dn_route_rx_long(skb); 671 return dn_route_rx_long(skb);
672 case DN_RT_PKT_SHORT: 672 case DN_RT_PKT_SHORT:
673 return dn_route_rx_short(skb); 673 return dn_route_rx_short(skb);
674 } 674 }
675 } 675 }
676 676
677dump_it: 677dump_it:
678 kfree_skb(skb); 678 kfree_skb(skb);
@@ -815,8 +815,8 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
815 rt->u.dst.neighbour = n; 815 rt->u.dst.neighbour = n;
816 } 816 }
817 817
818 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || 818 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
819 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) 819 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
820 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 820 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
821 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 821 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
822 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || 822 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
@@ -876,7 +876,7 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
876 876
877static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 877static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
878{ 878{
879 struct flowi fl = { .nl_u = { .dn_u = 879 struct flowi fl = { .nl_u = { .dn_u =
880 { .daddr = oldflp->fld_dst, 880 { .daddr = oldflp->fld_dst,
881 .saddr = oldflp->fld_src, 881 .saddr = oldflp->fld_src,
882 .scope = RT_SCOPE_UNIVERSE, 882 .scope = RT_SCOPE_UNIVERSE,
@@ -899,7 +899,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
899 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 899 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
900 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), 900 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst),
901 dn_ntohs(oldflp->fld_src), 901 dn_ntohs(oldflp->fld_src),
902 oldflp->mark, loopback_dev.ifindex, oldflp->oif); 902 oldflp->mark, loopback_dev.ifindex, oldflp->oif);
903 903
904 /* If we have an output interface, verify its a DECnet device */ 904 /* If we have an output interface, verify its a DECnet device */
905 if (oldflp->oif) { 905 if (oldflp->oif) {
@@ -982,19 +982,19 @@ source_ok:
982 if (err != -ESRCH) 982 if (err != -ESRCH)
983 goto out; 983 goto out;
984 /* 984 /*
985 * Here the fallback is basically the standard algorithm for 985 * Here the fallback is basically the standard algorithm for
986 * routing in endnodes which is described in the DECnet routing 986 * routing in endnodes which is described in the DECnet routing
987 * docs 987 * docs
988 * 988 *
989 * If we are not trying hard, look in neighbour cache. 989 * If we are not trying hard, look in neighbour cache.
990 * The result is tested to ensure that if a specific output 990 * The result is tested to ensure that if a specific output
991 * device/source address was requested, then we honour that 991 * device/source address was requested, then we honour that
992 * here 992 * here
993 */ 993 */
994 if (!try_hard) { 994 if (!try_hard) {
995 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); 995 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
996 if (neigh) { 996 if (neigh) {
997 if ((oldflp->oif && 997 if ((oldflp->oif &&
998 (neigh->dev->ifindex != oldflp->oif)) || 998 (neigh->dev->ifindex != oldflp->oif)) ||
999 (oldflp->fld_src && 999 (oldflp->fld_src &&
1000 (!dn_dev_islocal(neigh->dev, 1000 (!dn_dev_islocal(neigh->dev,
@@ -1044,7 +1044,7 @@ select_source:
1044 if (fl.fld_src == 0) { 1044 if (fl.fld_src == 0) {
1045 fl.fld_src = dnet_select_source(dev_out, gateway, 1045 fl.fld_src = dnet_select_source(dev_out, gateway,
1046 res.type == RTN_LOCAL ? 1046 res.type == RTN_LOCAL ?
1047 RT_SCOPE_HOST : 1047 RT_SCOPE_HOST :
1048 RT_SCOPE_LINK); 1048 RT_SCOPE_LINK);
1049 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1049 if (fl.fld_src == 0 && res.type != RTN_LOCAL)
1050 goto e_addr; 1050 goto e_addr;
@@ -1074,14 +1074,14 @@ select_source:
1074 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1074 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1075 dn_fib_select_multipath(&fl, &res); 1075 dn_fib_select_multipath(&fl, &res);
1076 1076
1077 /* 1077 /*
1078 * We could add some logic to deal with default routes here and 1078 * We could add some logic to deal with default routes here and
1079 * get rid of some of the special casing above. 1079 * get rid of some of the special casing above.
1080 */ 1080 */
1081 1081
1082 if (!fl.fld_src) 1082 if (!fl.fld_src)
1083 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1083 fl.fld_src = DN_FIB_RES_PREFSRC(res);
1084 1084
1085 if (dev_out) 1085 if (dev_out)
1086 dev_put(dev_out); 1086 dev_put(dev_out);
1087 dev_out = DN_FIB_RES_DEV(res); 1087 dev_out = DN_FIB_RES_DEV(res);
@@ -1144,8 +1144,8 @@ out:
1144 return err; 1144 return err;
1145 1145
1146e_addr: 1146e_addr:
1147 err = -EADDRNOTAVAIL; 1147 err = -EADDRNOTAVAIL;
1148 goto done; 1148 goto done;
1149e_inval: 1149e_inval:
1150 err = -EINVAL; 1150 err = -EINVAL;
1151 goto done; 1151 goto done;
@@ -1169,7 +1169,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1169 if (!(flags & MSG_TRYHARD)) { 1169 if (!(flags & MSG_TRYHARD)) {
1170 rcu_read_lock_bh(); 1170 rcu_read_lock_bh();
1171 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1171 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
1172 rt = rcu_dereference(rt->u.rt_next)) { 1172 rt = rcu_dereference(rt->u.dst.dn_next)) {
1173 if ((flp->fld_dst == rt->fl.fld_dst) && 1173 if ((flp->fld_dst == rt->fl.fld_dst) &&
1174 (flp->fld_src == rt->fl.fld_src) && 1174 (flp->fld_src == rt->fl.fld_src) &&
1175 (flp->mark == rt->fl.mark) && 1175 (flp->mark == rt->fl.mark) &&
@@ -1223,7 +1223,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1223 int flags = 0; 1223 int flags = 0;
1224 __le16 gateway = 0; 1224 __le16 gateway = 0;
1225 __le16 local_src = 0; 1225 __le16 local_src = 0;
1226 struct flowi fl = { .nl_u = { .dn_u = 1226 struct flowi fl = { .nl_u = { .dn_u =
1227 { .daddr = cb->dst, 1227 { .daddr = cb->dst,
1228 .saddr = cb->src, 1228 .saddr = cb->src,
1229 .scope = RT_SCOPE_UNIVERSE, 1229 .scope = RT_SCOPE_UNIVERSE,
@@ -1311,7 +1311,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1311 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1311 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1312 dn_fib_select_multipath(&fl, &res); 1312 dn_fib_select_multipath(&fl, &res);
1313 1313
1314 /* 1314 /*
1315 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1315 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1316 * flag as a hint to set the intra-ethernet bit when 1316 * flag as a hint to set the intra-ethernet bit when
1317 * forwarding. If we've got NAT in operation, we don't do 1317 * forwarding. If we've got NAT in operation, we don't do
@@ -1443,9 +1443,9 @@ int dn_route_input(struct sk_buff *skb)
1443 1443
1444 rcu_read_lock(); 1444 rcu_read_lock();
1445 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1445 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1446 rt = rcu_dereference(rt->u.rt_next)) { 1446 rt = rcu_dereference(rt->u.dst.dn_next)) {
1447 if ((rt->fl.fld_src == cb->src) && 1447 if ((rt->fl.fld_src == cb->src) &&
1448 (rt->fl.fld_dst == cb->dst) && 1448 (rt->fl.fld_dst == cb->dst) &&
1449 (rt->fl.oif == 0) && 1449 (rt->fl.oif == 0) &&
1450 (rt->fl.mark == skb->mark) && 1450 (rt->fl.mark == skb->mark) &&
1451 (rt->fl.iif == cb->iif)) { 1451 (rt->fl.iif == cb->iif)) {
@@ -1514,8 +1514,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1514 1514
1515nlmsg_failure: 1515nlmsg_failure:
1516rtattr_failure: 1516rtattr_failure:
1517 skb_trim(skb, b - skb->data); 1517 skb_trim(skb, b - skb->data);
1518 return -1; 1518 return -1;
1519} 1519}
1520 1520
1521/* 1521/*
@@ -1627,12 +1627,12 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1627 rcu_read_lock_bh(); 1627 rcu_read_lock_bh();
1628 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1628 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
1629 rt; 1629 rt;
1630 rt = rcu_dereference(rt->u.rt_next), idx++) { 1630 rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
1631 if (idx < s_idx) 1631 if (idx < s_idx)
1632 continue; 1632 continue;
1633 skb->dst = dst_clone(&rt->u.dst); 1633 skb->dst = dst_clone(&rt->u.dst);
1634 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1634 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1635 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1635 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1636 1, NLM_F_MULTI) <= 0) { 1636 1, NLM_F_MULTI) <= 0) {
1637 dst_release(xchg(&skb->dst, NULL)); 1637 dst_release(xchg(&skb->dst, NULL));
1638 rcu_read_unlock_bh(); 1638 rcu_read_unlock_bh();
@@ -1673,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1673{ 1673{
1674 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); 1674 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
1675 1675
1676 rt = rt->u.rt_next; 1676 rt = rt->u.dst.dn_next;
1677 while(!rt) { 1677 while(!rt) {
1678 rcu_read_unlock_bh(); 1678 rcu_read_unlock_bh();
1679 if (--s->bucket < 0) 1679 if (--s->bucket < 0)
@@ -1721,7 +1721,7 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1721 rt->u.dst.__use, 1721 rt->u.dst.__use,
1722 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1722 (int) dst_metric(&rt->u.dst, RTAX_RTT));
1723 return 0; 1723 return 0;
1724} 1724}
1725 1725
1726static struct seq_operations dn_rt_cache_seq_ops = { 1726static struct seq_operations dn_rt_cache_seq_ops = {
1727 .start = dn_rt_cache_seq_start, 1727 .start = dn_rt_cache_seq_start,
@@ -1778,38 +1778,38 @@ void __init dn_route_init(void)
1778 for(order = 0; (1UL << order) < goal; order++) 1778 for(order = 0; (1UL << order) < goal; order++)
1779 /* NOTHING */; 1779 /* NOTHING */;
1780 1780
1781 /* 1781 /*
1782 * Only want 1024 entries max, since the table is very, very unlikely 1782 * Only want 1024 entries max, since the table is very, very unlikely
1783 * to be larger than that. 1783 * to be larger than that.
1784 */ 1784 */
1785 while(order && ((((1UL << order) * PAGE_SIZE) / 1785 while(order && ((((1UL << order) * PAGE_SIZE) /
1786 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1786 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1787 order--; 1787 order--;
1788 1788
1789 do { 1789 do {
1790 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1790 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1791 sizeof(struct dn_rt_hash_bucket); 1791 sizeof(struct dn_rt_hash_bucket);
1792 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1792 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1793 dn_rt_hash_mask--; 1793 dn_rt_hash_mask--;
1794 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1794 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1795 __get_free_pages(GFP_ATOMIC, order); 1795 __get_free_pages(GFP_ATOMIC, order);
1796 } while (dn_rt_hash_table == NULL && --order > 0); 1796 } while (dn_rt_hash_table == NULL && --order > 0);
1797 1797
1798 if (!dn_rt_hash_table) 1798 if (!dn_rt_hash_table)
1799 panic("Failed to allocate DECnet route cache hash table\n"); 1799 panic("Failed to allocate DECnet route cache hash table\n");
1800 1800
1801 printk(KERN_INFO 1801 printk(KERN_INFO
1802 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1802 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1803 dn_rt_hash_mask, 1803 dn_rt_hash_mask,
1804 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1804 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1805 1805
1806 dn_rt_hash_mask--; 1806 dn_rt_hash_mask--;
1807 for(i = 0; i <= dn_rt_hash_mask; i++) { 1807 for(i = 0; i <= dn_rt_hash_mask; i++) {
1808 spin_lock_init(&dn_rt_hash_table[i].lock); 1808 spin_lock_init(&dn_rt_hash_table[i].lock);
1809 dn_rt_hash_table[i].chain = NULL; 1809 dn_rt_hash_table[i].chain = NULL;
1810 } 1810 }
1811 1811
1812 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1812 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1813 1813
1814 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1814 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1815} 1815}