aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2007-02-09 09:24:47 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:19:39 -0500
commite905a9edab7f4f14f9213b52234e4a346c690911 (patch)
tree9e52a5f47eec47c5685c347ff7af22290a10305b /net/ipv4/route.c
parent642656518b2e64fd59d9bbd15b6885cac5fe99b1 (diff)
[NET] IPV4: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c110
1 files changed, 55 insertions, 55 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index baee304a3cb7..56d6602affb4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -20,7 +20,7 @@
20 * (rco@di.uminho.pt) Routing table insertion and update 20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible 21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K 23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table 24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window 25 * Alan Cox : MSS actually. Also added the window
26 * clamper. 26 * clamper.
@@ -38,7 +38,7 @@
38 * Alan Cox : Faster /proc handling 38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour. 40 * routing caches and better behaviour.
41 * 41 *
42 * Olaf Erb : irtt wasn't being copied right. 42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support. 43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope) 44 * Alan Cox : Multicast fixed (I hope)
@@ -361,8 +361,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
361 dev_queue_xmit) : 0, 361 dev_queue_xmit) : 0,
362 r->rt_spec_dst); 362 r->rt_spec_dst);
363 seq_printf(seq, "%-127s\n", temp); 363 seq_printf(seq, "%-127s\n", temp);
364 } 364 }
365 return 0; 365 return 0;
366} 366}
367 367
368static struct seq_operations rt_cache_seq_ops = { 368static struct seq_operations rt_cache_seq_ops = {
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
429 return &per_cpu(rt_cache_stat, cpu); 429 return &per_cpu(rt_cache_stat, cpu);
430 } 430 }
431 return NULL; 431 return NULL;
432 432
433} 433}
434 434
435static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 435static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
@@ -445,7 +445,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
446 return 0; 446 return 0;
447 } 447 }
448 448
449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " 449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", 450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
451 atomic_read(&ipv4_dst_ops.entries), 451 atomic_read(&ipv4_dst_ops.entries),
@@ -459,7 +459,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
459 459
460 st->out_hit, 460 st->out_hit,
461 st->out_slow_tot, 461 st->out_slow_tot,
462 st->out_slow_mc, 462 st->out_slow_mc,
463 463
464 st->gc_total, 464 st->gc_total,
465 st->gc_ignored, 465 st->gc_ignored,
@@ -493,7 +493,7 @@ static struct file_operations rt_cpu_seq_fops = {
493}; 493};
494 494
495#endif /* CONFIG_PROC_FS */ 495#endif /* CONFIG_PROC_FS */
496 496
497static __inline__ void rt_free(struct rtable *rt) 497static __inline__ void rt_free(struct rtable *rt)
498{ 498{
499 multipath_remove(rt); 499 multipath_remove(rt);
@@ -672,8 +672,8 @@ static void rt_check_expire(unsigned long dummy)
672 rt_free(rth); 672 rt_free(rth);
673 } 673 }
674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
675 *rthp = rth->u.rt_next; 675 *rthp = rth->u.rt_next;
676 rt_free(rth); 676 rt_free(rth);
677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
678 } 678 }
679 spin_unlock(rt_hash_lock_addr(i)); 679 spin_unlock(rt_hash_lock_addr(i));
@@ -739,7 +739,7 @@ void rt_cache_flush(int delay)
739 739
740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) 740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
741 tmo = 0; 741 tmo = 0;
742 742
743 if (delay > tmo) 743 if (delay > tmo)
744 delay = tmo; 744 delay = tmo;
745 } 745 }
@@ -1104,7 +1104,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1104 return; 1104 return;
1105 } 1105 }
1106 } else 1106 } else
1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1108 __builtin_return_address(0)); 1108 __builtin_return_address(0));
1109 1109
1110 ip_select_fb_ident(iph); 1110 ip_select_fb_ident(iph);
@@ -1190,7 +1190,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1190 1190
1191 /* Copy all the information. */ 1191 /* Copy all the information. */
1192 *rt = *rth; 1192 *rt = *rth;
1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head); 1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1194 rt->u.dst.__use = 1; 1194 rt->u.dst.__use = 1;
1195 atomic_set(&rt->u.dst.__refcnt, 1); 1195 atomic_set(&rt->u.dst.__refcnt, 1);
1196 rt->u.dst.child = NULL; 1196 rt->u.dst.child = NULL;
@@ -1225,11 +1225,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1225 rt_drop(rt); 1225 rt_drop(rt);
1226 goto do_next; 1226 goto do_next;
1227 } 1227 }
1228 1228
1229 netevent.old = &rth->u.dst; 1229 netevent.old = &rth->u.dst;
1230 netevent.new = &rt->u.dst; 1230 netevent.new = &rt->u.dst;
1231 call_netevent_notifiers(NETEVENT_REDIRECT, 1231 call_netevent_notifiers(NETEVENT_REDIRECT,
1232 &netevent); 1232 &netevent);
1233 1233
1234 rt_del(hash, rth); 1234 rt_del(hash, rth);
1235 if (!rt_intern_hash(hash, rt, &rt)) 1235 if (!rt_intern_hash(hash, rt, &rt))
@@ -1343,7 +1343,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1343#endif 1343#endif
1344 } 1344 }
1345out: 1345out:
1346 in_dev_put(in_dev); 1346 in_dev_put(in_dev);
1347} 1347}
1348 1348
1349static int ip_error(struct sk_buff *skb) 1349static int ip_error(struct sk_buff *skb)
@@ -1379,7 +1379,7 @@ static int ip_error(struct sk_buff *skb)
1379 1379
1380out: kfree_skb(skb); 1380out: kfree_skb(skb);
1381 return 0; 1381 return 0;
1382} 1382}
1383 1383
1384/* 1384/*
1385 * The last two values are not from the RFC but 1385 * The last two values are not from the RFC but
@@ -1392,7 +1392,7 @@ static const unsigned short mtu_plateau[] =
1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu) 1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1393{ 1393{
1394 int i; 1394 int i;
1395 1395
1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) 1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1397 if (old_mtu > mtu_plateau[i]) 1397 if (old_mtu > mtu_plateau[i])
1398 return mtu_plateau[i]; 1398 return mtu_plateau[i];
@@ -1436,7 +1436,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1436 mtu = guess_mtu(old_mtu); 1436 mtu = guess_mtu(old_mtu);
1437 } 1437 }
1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { 1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1440 dst_confirm(&rth->u.dst); 1440 dst_confirm(&rth->u.dst);
1441 if (mtu < ip_rt_min_pmtu) { 1441 if (mtu < ip_rt_min_pmtu) {
1442 mtu = ip_rt_min_pmtu; 1442 mtu = ip_rt_min_pmtu;
@@ -1600,7 +1600,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1600#endif 1600#endif
1601 set_class_tag(rt, itag); 1601 set_class_tag(rt, itag);
1602#endif 1602#endif
1603 rt->rt_type = res->type; 1603 rt->rt_type = res->type;
1604} 1604}
1605 1605
1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -1714,11 +1714,11 @@ static void ip_handle_martian_source(struct net_device *dev,
1714#endif 1714#endif
1715} 1715}
1716 1716
1717static inline int __mkroute_input(struct sk_buff *skb, 1717static inline int __mkroute_input(struct sk_buff *skb,
1718 struct fib_result* res, 1718 struct fib_result* res,
1719 struct in_device *in_dev, 1719 struct in_device *in_dev,
1720 __be32 daddr, __be32 saddr, u32 tos, 1720 __be32 daddr, __be32 saddr, u32 tos,
1721 struct rtable **result) 1721 struct rtable **result)
1722{ 1722{
1723 1723
1724 struct rtable *rth; 1724 struct rtable *rth;
@@ -1738,12 +1738,12 @@ static inline int __mkroute_input(struct sk_buff *skb,
1738 } 1738 }
1739 1739
1740 1740
1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1742 in_dev->dev, &spec_dst, &itag); 1742 in_dev->dev, &spec_dst, &itag);
1743 if (err < 0) { 1743 if (err < 0) {
1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1745 saddr); 1745 saddr);
1746 1746
1747 err = -EINVAL; 1747 err = -EINVAL;
1748 goto cleanup; 1748 goto cleanup;
1749 } 1749 }
@@ -1811,10 +1811,10 @@ static inline int __mkroute_input(struct sk_buff *skb,
1811 /* release the working reference to the output device */ 1811 /* release the working reference to the output device */
1812 in_dev_put(out_dev); 1812 in_dev_put(out_dev);
1813 return err; 1813 return err;
1814} 1814}
1815 1815
1816static inline int ip_mkroute_input_def(struct sk_buff *skb, 1816static inline int ip_mkroute_input_def(struct sk_buff *skb,
1817 struct fib_result* res, 1817 struct fib_result* res,
1818 const struct flowi *fl, 1818 const struct flowi *fl,
1819 struct in_device *in_dev, 1819 struct in_device *in_dev,
1820 __be32 daddr, __be32 saddr, u32 tos) 1820 __be32 daddr, __be32 saddr, u32 tos)
@@ -1835,11 +1835,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1835 1835
1836 /* put it into the cache */ 1836 /* put it into the cache */
1837 hash = rt_hash(daddr, saddr, fl->iif); 1837 hash = rt_hash(daddr, saddr, fl->iif);
1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1839} 1839}
1840 1840
1841static inline int ip_mkroute_input(struct sk_buff *skb, 1841static inline int ip_mkroute_input(struct sk_buff *skb,
1842 struct fib_result* res, 1842 struct fib_result* res,
1843 const struct flowi *fl, 1843 const struct flowi *fl,
1844 struct in_device *in_dev, 1844 struct in_device *in_dev,
1845 __be32 daddr, __be32 saddr, u32 tos) 1845 __be32 daddr, __be32 saddr, u32 tos)
@@ -1859,7 +1859,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1859 if (hopcount < 2) 1859 if (hopcount < 2)
1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, 1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
1861 saddr, tos); 1861 saddr, tos);
1862 1862
1863 /* add all alternatives to the routing cache */ 1863 /* add all alternatives to the routing cache */
1864 for (hop = 0; hop < hopcount; hop++) { 1864 for (hop = 0; hop < hopcount; hop++) {
1865 res->nh_sel = hop; 1865 res->nh_sel = hop;
@@ -1988,7 +1988,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1988 goto e_nobufs; 1988 goto e_nobufs;
1989 if (err == -EINVAL) 1989 if (err == -EINVAL)
1990 goto e_inval; 1990 goto e_inval;
1991 1991
1992done: 1992done:
1993 in_dev_put(in_dev); 1993 in_dev_put(in_dev);
1994 if (free_res) 1994 if (free_res)
@@ -2071,8 +2071,8 @@ martian_destination:
2071#endif 2071#endif
2072 2072
2073e_hostunreach: 2073e_hostunreach:
2074 err = -EHOSTUNREACH; 2074 err = -EHOSTUNREACH;
2075 goto done; 2075 goto done;
2076 2076
2077e_inval: 2077e_inval:
2078 err = -EINVAL; 2078 err = -EINVAL;
@@ -2153,11 +2153,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2153} 2153}
2154 2154
2155static inline int __mkroute_output(struct rtable **result, 2155static inline int __mkroute_output(struct rtable **result,
2156 struct fib_result* res, 2156 struct fib_result* res,
2157 const struct flowi *fl, 2157 const struct flowi *fl,
2158 const struct flowi *oldflp, 2158 const struct flowi *oldflp,
2159 struct net_device *dev_out, 2159 struct net_device *dev_out,
2160 unsigned flags) 2160 unsigned flags)
2161{ 2161{
2162 struct rtable *rth; 2162 struct rtable *rth;
2163 struct in_device *in_dev; 2163 struct in_device *in_dev;
@@ -2190,7 +2190,7 @@ static inline int __mkroute_output(struct rtable **result,
2190 } 2190 }
2191 } else if (res->type == RTN_MULTICAST) { 2191 } else if (res->type == RTN_MULTICAST) {
2192 flags |= RTCF_MULTICAST|RTCF_LOCAL; 2192 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, 2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2194 oldflp->proto)) 2194 oldflp->proto))
2195 flags &= ~RTCF_LOCAL; 2195 flags &= ~RTCF_LOCAL;
2196 /* If multicast route do not exist use 2196 /* If multicast route do not exist use
@@ -2208,7 +2208,7 @@ static inline int __mkroute_output(struct rtable **result,
2208 if (!rth) { 2208 if (!rth) {
2209 err = -ENOBUFS; 2209 err = -ENOBUFS;
2210 goto cleanup; 2210 goto cleanup;
2211 } 2211 }
2212 2212
2213 atomic_set(&rth->u.dst.__refcnt, 1); 2213 atomic_set(&rth->u.dst.__refcnt, 1);
2214 rth->u.dst.flags= DST_HOST; 2214 rth->u.dst.flags= DST_HOST;
@@ -2232,7 +2232,7 @@ static inline int __mkroute_output(struct rtable **result,
2232 rth->rt_dst = fl->fl4_dst; 2232 rth->rt_dst = fl->fl4_dst;
2233 rth->rt_src = fl->fl4_src; 2233 rth->rt_src = fl->fl4_src;
2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2235 /* get references to the devices that are to be hold by the routing 2235 /* get references to the devices that are to be hold by the routing
2236 cache entry */ 2236 cache entry */
2237 rth->u.dst.dev = dev_out; 2237 rth->u.dst.dev = dev_out;
2238 dev_hold(dev_out); 2238 dev_hold(dev_out);
@@ -2250,7 +2250,7 @@ static inline int __mkroute_output(struct rtable **result,
2250 } 2250 }
2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2252 rth->rt_spec_dst = fl->fl4_src; 2252 rth->rt_spec_dst = fl->fl4_src;
2253 if (flags & RTCF_LOCAL && 2253 if (flags & RTCF_LOCAL &&
2254 !(dev_out->flags & IFF_LOOPBACK)) { 2254 !(dev_out->flags & IFF_LOOPBACK)) {
2255 rth->u.dst.output = ip_mc_output; 2255 rth->u.dst.output = ip_mc_output;
2256 RT_CACHE_STAT_INC(out_slow_mc); 2256 RT_CACHE_STAT_INC(out_slow_mc);
@@ -2292,7 +2292,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); 2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2293 err = rt_intern_hash(hash, rth, rp); 2293 err = rt_intern_hash(hash, rth, rp);
2294 } 2294 }
2295 2295
2296 return err; 2296 return err;
2297} 2297}
2298 2298
@@ -2830,7 +2830,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2830 continue; 2830 continue;
2831 skb->dst = dst_clone(&rt->u.dst); 2831 skb->dst = dst_clone(&rt->u.dst);
2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2834 1, NLM_F_MULTI) <= 0) { 2834 1, NLM_F_MULTI) <= 0) {
2835 dst_release(xchg(&skb->dst, NULL)); 2835 dst_release(xchg(&skb->dst, NULL));
2836 rcu_read_unlock_bh(); 2836 rcu_read_unlock_bh();
@@ -2863,7 +2863,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2864 rt_cache_flush(flush_delay); 2864 rt_cache_flush(flush_delay);
2865 return 0; 2865 return 0;
2866 } 2866 }
2867 2867
2868 return -EINVAL; 2868 return -EINVAL;
2869} 2869}
@@ -2880,13 +2880,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2880 if (newlen != sizeof(int)) 2880 if (newlen != sizeof(int))
2881 return -EINVAL; 2881 return -EINVAL;
2882 if (get_user(delay, (int __user *)newval)) 2882 if (get_user(delay, (int __user *)newval))
2883 return -EFAULT; 2883 return -EFAULT;
2884 rt_cache_flush(delay); 2884 rt_cache_flush(delay);
2885 return 0; 2885 return 0;
2886} 2886}
2887 2887
2888ctl_table ipv4_route_table[] = { 2888ctl_table ipv4_route_table[] = {
2889 { 2889 {
2890 .ctl_name = NET_IPV4_ROUTE_FLUSH, 2890 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2891 .procname = "flush", 2891 .procname = "flush",
2892 .data = &flush_delay, 2892 .data = &flush_delay,
@@ -2931,7 +2931,7 @@ ctl_table ipv4_route_table[] = {
2931 }, 2931 },
2932 { 2932 {
2933 /* Deprecated. Use gc_min_interval_ms */ 2933 /* Deprecated. Use gc_min_interval_ms */
2934 2934
2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL, 2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2936 .procname = "gc_min_interval", 2936 .procname = "gc_min_interval",
2937 .data = &ip_rt_gc_min_interval, 2937 .data = &ip_rt_gc_min_interval,
@@ -3180,8 +3180,8 @@ int __init ip_rt_init(void)
3180 { 3180 {
3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ 3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || 3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
3184 proc_net_stat))) { 3184 proc_net_stat))) {
3185 return -ENOMEM; 3185 return -ENOMEM;
3186 } 3186 }
3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops; 3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops;