diff options
author | YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 2007-02-09 09:24:36 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-02-11 02:19:25 -0500 |
commit | 4ec93edb14fe5fdee9fae6335f2cbba204627eac (patch) | |
tree | b768f96942b5d19f5bc36b47262c42a8a7acdd7f /net/core/dev.c | |
parent | 9d6f229fc45b6ac268020c0c8eff29e94bb34381 (diff) |
[NET] CORE: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 96 |
1 files changed, 48 insertions, 48 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1e94a1b9a0f4..85d58d799329 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -255,7 +255,7 @@ static int netdev_nit; | |||
255 | * is linked into kernel lists and may not be freed until it has been | 255 | * is linked into kernel lists and may not be freed until it has been |
256 | * removed from the kernel lists. | 256 | * removed from the kernel lists. |
257 | * | 257 | * |
258 | * This call does not sleep therefore it can not | 258 | * This call does not sleep therefore it can not |
259 | * guarantee all CPU's that are in middle of receiving packets | 259 | * guarantee all CPU's that are in middle of receiving packets |
260 | * will see the new packet type (until the next received packet). | 260 | * will see the new packet type (until the next received packet). |
261 | */ | 261 | */ |
@@ -282,7 +282,7 @@ void dev_add_pack(struct packet_type *pt) | |||
282 | * Remove a protocol handler that was previously added to the kernel | 282 | * Remove a protocol handler that was previously added to the kernel |
283 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed | 283 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
284 | * from the kernel lists and can be freed or reused once this function | 284 | * from the kernel lists and can be freed or reused once this function |
285 | * returns. | 285 | * returns. |
286 | * | 286 | * |
287 | * The packet type might still be in use by receivers | 287 | * The packet type might still be in use by receivers |
288 | * and must not be freed until after all the CPU's have gone | 288 | * and must not be freed until after all the CPU's have gone |
@@ -327,7 +327,7 @@ out: | |||
327 | void dev_remove_pack(struct packet_type *pt) | 327 | void dev_remove_pack(struct packet_type *pt) |
328 | { | 328 | { |
329 | __dev_remove_pack(pt); | 329 | __dev_remove_pack(pt); |
330 | 330 | ||
331 | synchronize_net(); | 331 | synchronize_net(); |
332 | } | 332 | } |
333 | 333 | ||
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype); | |||
607 | * @mask: bitmask of bits in if_flags to check | 607 | * @mask: bitmask of bits in if_flags to check |
608 | * | 608 | * |
609 | * Search for any interface with the given flags. Returns NULL if a device | 609 | * Search for any interface with the given flags. Returns NULL if a device |
610 | * is not found or a pointer to the device. The device returned has | 610 | * is not found or a pointer to the device. The device returned has |
611 | * had a reference added and the pointer is safe until the user calls | 611 | * had a reference added and the pointer is safe until the user calls |
612 | * dev_put to indicate they have finished with it. | 612 | * dev_put to indicate they have finished with it. |
613 | */ | 613 | */ |
@@ -802,7 +802,7 @@ void netdev_state_change(struct net_device *dev) | |||
802 | 802 | ||
803 | void dev_load(const char *name) | 803 | void dev_load(const char *name) |
804 | { | 804 | { |
805 | struct net_device *dev; | 805 | struct net_device *dev; |
806 | 806 | ||
807 | read_lock(&dev_base_lock); | 807 | read_lock(&dev_base_lock); |
808 | dev = __dev_get_by_name(name); | 808 | dev = __dev_get_by_name(name); |
@@ -860,7 +860,7 @@ int dev_open(struct net_device *dev) | |||
860 | clear_bit(__LINK_STATE_START, &dev->state); | 860 | clear_bit(__LINK_STATE_START, &dev->state); |
861 | } | 861 | } |
862 | 862 | ||
863 | /* | 863 | /* |
864 | * If it went open OK then: | 864 | * If it went open OK then: |
865 | */ | 865 | */ |
866 | 866 | ||
@@ -964,7 +964,7 @@ int dev_close(struct net_device *dev) | |||
964 | * is returned on a failure. | 964 | * is returned on a failure. |
965 | * | 965 | * |
966 | * When registered all registration and up events are replayed | 966 | * When registered all registration and up events are replayed |
967 | * to the new notifier to allow device to have a race free | 967 | * to the new notifier to allow device to have a race free |
968 | * view of the network device list. | 968 | * view of the network device list. |
969 | */ | 969 | */ |
970 | 970 | ||
@@ -979,7 +979,7 @@ int register_netdevice_notifier(struct notifier_block *nb) | |||
979 | for (dev = dev_base; dev; dev = dev->next) { | 979 | for (dev = dev_base; dev; dev = dev->next) { |
980 | nb->notifier_call(nb, NETDEV_REGISTER, dev); | 980 | nb->notifier_call(nb, NETDEV_REGISTER, dev); |
981 | 981 | ||
982 | if (dev->flags & IFF_UP) | 982 | if (dev->flags & IFF_UP) |
983 | nb->notifier_call(nb, NETDEV_UP, dev); | 983 | nb->notifier_call(nb, NETDEV_UP, dev); |
984 | } | 984 | } |
985 | } | 985 | } |
@@ -1157,7 +1157,7 @@ void netif_device_attach(struct net_device *dev) | |||
1157 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 1157 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
1158 | netif_running(dev)) { | 1158 | netif_running(dev)) { |
1159 | netif_wake_queue(dev); | 1159 | netif_wake_queue(dev); |
1160 | __netdev_watchdog_up(dev); | 1160 | __netdev_watchdog_up(dev); |
1161 | } | 1161 | } |
1162 | } | 1162 | } |
1163 | EXPORT_SYMBOL(netif_device_attach); | 1163 | EXPORT_SYMBOL(netif_device_attach); |
@@ -1197,7 +1197,7 @@ int skb_checksum_help(struct sk_buff *skb) | |||
1197 | 1197 | ||
1198 | out_set_summed: | 1198 | out_set_summed: |
1199 | skb->ip_summed = CHECKSUM_NONE; | 1199 | skb->ip_summed = CHECKSUM_NONE; |
1200 | out: | 1200 | out: |
1201 | return ret; | 1201 | return ret; |
1202 | } | 1202 | } |
1203 | 1203 | ||
@@ -1258,7 +1258,7 @@ EXPORT_SYMBOL(skb_gso_segment); | |||
1258 | void netdev_rx_csum_fault(struct net_device *dev) | 1258 | void netdev_rx_csum_fault(struct net_device *dev) |
1259 | { | 1259 | { |
1260 | if (net_ratelimit()) { | 1260 | if (net_ratelimit()) { |
1261 | printk(KERN_ERR "%s: hw csum failure.\n", | 1261 | printk(KERN_ERR "%s: hw csum failure.\n", |
1262 | dev ? dev->name : "<unknown>"); | 1262 | dev ? dev->name : "<unknown>"); |
1263 | dump_stack(); | 1263 | dump_stack(); |
1264 | } | 1264 | } |
@@ -1372,7 +1372,7 @@ gso: | |||
1372 | if (unlikely(netif_queue_stopped(dev) && skb->next)) | 1372 | if (unlikely(netif_queue_stopped(dev) && skb->next)) |
1373 | return NETDEV_TX_BUSY; | 1373 | return NETDEV_TX_BUSY; |
1374 | } while (skb->next); | 1374 | } while (skb->next); |
1375 | 1375 | ||
1376 | skb->destructor = DEV_GSO_CB(skb)->destructor; | 1376 | skb->destructor = DEV_GSO_CB(skb)->destructor; |
1377 | 1377 | ||
1378 | out_kfree_skb: | 1378 | out_kfree_skb: |
@@ -1449,25 +1449,25 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1449 | (!(dev->features & NETIF_F_GEN_CSUM) && | 1449 | (!(dev->features & NETIF_F_GEN_CSUM) && |
1450 | (!(dev->features & NETIF_F_IP_CSUM) || | 1450 | (!(dev->features & NETIF_F_IP_CSUM) || |
1451 | skb->protocol != htons(ETH_P_IP)))) | 1451 | skb->protocol != htons(ETH_P_IP)))) |
1452 | if (skb_checksum_help(skb)) | 1452 | if (skb_checksum_help(skb)) |
1453 | goto out_kfree_skb; | 1453 | goto out_kfree_skb; |
1454 | 1454 | ||
1455 | gso: | 1455 | gso: |
1456 | spin_lock_prefetch(&dev->queue_lock); | 1456 | spin_lock_prefetch(&dev->queue_lock); |
1457 | 1457 | ||
1458 | /* Disable soft irqs for various locks below. Also | 1458 | /* Disable soft irqs for various locks below. Also |
1459 | * stops preemption for RCU. | 1459 | * stops preemption for RCU. |
1460 | */ | 1460 | */ |
1461 | rcu_read_lock_bh(); | 1461 | rcu_read_lock_bh(); |
1462 | 1462 | ||
1463 | /* Updates of qdisc are serialized by queue_lock. | 1463 | /* Updates of qdisc are serialized by queue_lock. |
1464 | * The struct Qdisc which is pointed to by qdisc is now a | 1464 | * The struct Qdisc which is pointed to by qdisc is now a |
1465 | * rcu structure - it may be accessed without acquiring | 1465 | * rcu structure - it may be accessed without acquiring |
1466 | * a lock (but the structure may be stale.) The freeing of the | 1466 | * a lock (but the structure may be stale.) The freeing of the |
1467 | * qdisc will be deferred until it's known that there are no | 1467 | * qdisc will be deferred until it's known that there are no |
1468 | * more references to it. | 1468 | * more references to it. |
1469 | * | 1469 | * |
1470 | * If the qdisc has an enqueue function, we still need to | 1470 | * If the qdisc has an enqueue function, we still need to |
1471 | * hold the queue_lock before calling it, since queue_lock | 1471 | * hold the queue_lock before calling it, since queue_lock |
1472 | * also serializes access to the device queue. | 1472 | * also serializes access to the device queue. |
1473 | */ | 1473 | */ |
@@ -1715,8 +1715,8 @@ static __inline__ int handle_bridge(struct sk_buff **pskb, | |||
1715 | if (*pt_prev) { | 1715 | if (*pt_prev) { |
1716 | *ret = deliver_skb(*pskb, *pt_prev, orig_dev); | 1716 | *ret = deliver_skb(*pskb, *pt_prev, orig_dev); |
1717 | *pt_prev = NULL; | 1717 | *pt_prev = NULL; |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | return br_handle_frame_hook(port, pskb); | 1720 | return br_handle_frame_hook(port, pskb); |
1721 | } | 1721 | } |
1722 | #else | 1722 | #else |
@@ -1728,16 +1728,16 @@ static __inline__ int handle_bridge(struct sk_buff **pskb, | |||
1728 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 1728 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions |
1729 | * a compare and 2 stores extra right now if we dont have it on | 1729 | * a compare and 2 stores extra right now if we dont have it on |
1730 | * but have CONFIG_NET_CLS_ACT | 1730 | * but have CONFIG_NET_CLS_ACT |
1731 | * NOTE: This doesnt stop any functionality; if you dont have | 1731 | * NOTE: This doesnt stop any functionality; if you dont have |
1732 | * the ingress scheduler, you just cant add policies on ingress. | 1732 | * the ingress scheduler, you just cant add policies on ingress. |
1733 | * | 1733 | * |
1734 | */ | 1734 | */ |
1735 | static int ing_filter(struct sk_buff *skb) | 1735 | static int ing_filter(struct sk_buff *skb) |
1736 | { | 1736 | { |
1737 | struct Qdisc *q; | 1737 | struct Qdisc *q; |
1738 | struct net_device *dev = skb->dev; | 1738 | struct net_device *dev = skb->dev; |
1739 | int result = TC_ACT_OK; | 1739 | int result = TC_ACT_OK; |
1740 | 1740 | ||
1741 | if (dev->qdisc_ingress) { | 1741 | if (dev->qdisc_ingress) { |
1742 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); | 1742 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); |
1743 | if (MAX_RED_LOOP < ttl++) { | 1743 | if (MAX_RED_LOOP < ttl++) { |
@@ -1801,7 +1801,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1801 | 1801 | ||
1802 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 1802 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
1803 | if (!ptype->dev || ptype->dev == skb->dev) { | 1803 | if (!ptype->dev || ptype->dev == skb->dev) { |
1804 | if (pt_prev) | 1804 | if (pt_prev) |
1805 | ret = deliver_skb(skb, pt_prev, orig_dev); | 1805 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1806 | pt_prev = ptype; | 1806 | pt_prev = ptype; |
1807 | } | 1807 | } |
@@ -1833,7 +1833,7 @@ ncls: | |||
1833 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { | 1833 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { |
1834 | if (ptype->type == type && | 1834 | if (ptype->type == type && |
1835 | (!ptype->dev || ptype->dev == skb->dev)) { | 1835 | (!ptype->dev || ptype->dev == skb->dev)) { |
1836 | if (pt_prev) | 1836 | if (pt_prev) |
1837 | ret = deliver_skb(skb, pt_prev, orig_dev); | 1837 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1838 | pt_prev = ptype; | 1838 | pt_prev = ptype; |
1839 | } | 1839 | } |
@@ -2061,7 +2061,7 @@ static int dev_ifconf(char __user *arg) | |||
2061 | total += done; | 2061 | total += done; |
2062 | } | 2062 | } |
2063 | } | 2063 | } |
2064 | } | 2064 | } |
2065 | 2065 | ||
2066 | /* | 2066 | /* |
2067 | * All done. Write the updated control block back to the caller. | 2067 | * All done. Write the updated control block back to the caller. |
@@ -2154,7 +2154,7 @@ static struct netif_rx_stats *softnet_get_online(loff_t *pos) | |||
2154 | struct netif_rx_stats *rc = NULL; | 2154 | struct netif_rx_stats *rc = NULL; |
2155 | 2155 | ||
2156 | while (*pos < NR_CPUS) | 2156 | while (*pos < NR_CPUS) |
2157 | if (cpu_online(*pos)) { | 2157 | if (cpu_online(*pos)) { |
2158 | rc = &per_cpu(netdev_rx_stat, *pos); | 2158 | rc = &per_cpu(netdev_rx_stat, *pos); |
2159 | break; | 2159 | break; |
2160 | } else | 2160 | } else |
@@ -2282,7 +2282,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
2282 | } | 2282 | } |
2283 | 2283 | ||
2284 | slave->master = master; | 2284 | slave->master = master; |
2285 | 2285 | ||
2286 | synchronize_net(); | 2286 | synchronize_net(); |
2287 | 2287 | ||
2288 | if (old) | 2288 | if (old) |
@@ -2319,13 +2319,13 @@ void dev_set_promiscuity(struct net_device *dev, int inc) | |||
2319 | dev_mc_upload(dev); | 2319 | dev_mc_upload(dev); |
2320 | printk(KERN_INFO "device %s %s promiscuous mode\n", | 2320 | printk(KERN_INFO "device %s %s promiscuous mode\n", |
2321 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : | 2321 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : |
2322 | "left"); | 2322 | "left"); |
2323 | audit_log(current->audit_context, GFP_ATOMIC, | 2323 | audit_log(current->audit_context, GFP_ATOMIC, |
2324 | AUDIT_ANOM_PROMISCUOUS, | 2324 | AUDIT_ANOM_PROMISCUOUS, |
2325 | "dev=%s prom=%d old_prom=%d auid=%u", | 2325 | "dev=%s prom=%d old_prom=%d auid=%u", |
2326 | dev->name, (dev->flags & IFF_PROMISC), | 2326 | dev->name, (dev->flags & IFF_PROMISC), |
2327 | (old_flags & IFF_PROMISC), | 2327 | (old_flags & IFF_PROMISC), |
2328 | audit_get_loginuid(current->audit_context)); | 2328 | audit_get_loginuid(current->audit_context)); |
2329 | } | 2329 | } |
2330 | } | 2330 | } |
2331 | 2331 | ||
@@ -2816,7 +2816,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2816 | rtnl_unlock(); | 2816 | rtnl_unlock(); |
2817 | if (IW_IS_GET(cmd) && | 2817 | if (IW_IS_GET(cmd) && |
2818 | copy_to_user(arg, &ifr, | 2818 | copy_to_user(arg, &ifr, |
2819 | sizeof(struct ifreq))) | 2819 | sizeof(struct ifreq))) |
2820 | ret = -EFAULT; | 2820 | ret = -EFAULT; |
2821 | return ret; | 2821 | return ret; |
2822 | } | 2822 | } |
@@ -2906,7 +2906,7 @@ int register_netdevice(struct net_device *dev) | |||
2906 | goto out; | 2906 | goto out; |
2907 | } | 2907 | } |
2908 | } | 2908 | } |
2909 | 2909 | ||
2910 | if (!dev_valid_name(dev->name)) { | 2910 | if (!dev_valid_name(dev->name)) { |
2911 | ret = -EINVAL; | 2911 | ret = -EINVAL; |
2912 | goto out; | 2912 | goto out; |
@@ -2923,9 +2923,9 @@ int register_netdevice(struct net_device *dev) | |||
2923 | = hlist_entry(p, struct net_device, name_hlist); | 2923 | = hlist_entry(p, struct net_device, name_hlist); |
2924 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | 2924 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { |
2925 | ret = -EEXIST; | 2925 | ret = -EEXIST; |
2926 | goto out; | 2926 | goto out; |
2927 | } | 2927 | } |
2928 | } | 2928 | } |
2929 | 2929 | ||
2930 | /* Fix illegal SG+CSUM combinations. */ | 2930 | /* Fix illegal SG+CSUM combinations. */ |
2931 | if ((dev->features & NETIF_F_SG) && | 2931 | if ((dev->features & NETIF_F_SG) && |
@@ -3024,7 +3024,7 @@ int register_netdev(struct net_device *dev) | |||
3024 | if (err < 0) | 3024 | if (err < 0) |
3025 | goto out; | 3025 | goto out; |
3026 | } | 3026 | } |
3027 | 3027 | ||
3028 | err = register_netdevice(dev); | 3028 | err = register_netdevice(dev); |
3029 | out: | 3029 | out: |
3030 | rtnl_unlock(); | 3030 | rtnl_unlock(); |
@@ -3041,7 +3041,7 @@ EXPORT_SYMBOL(register_netdev); | |||
3041 | * for netdevice notification, and cleanup and put back the | 3041 | * for netdevice notification, and cleanup and put back the |
3042 | * reference if they receive an UNREGISTER event. | 3042 | * reference if they receive an UNREGISTER event. |
3043 | * We can get stuck here if buggy protocols don't correctly | 3043 | * We can get stuck here if buggy protocols don't correctly |
3044 | * call dev_put. | 3044 | * call dev_put. |
3045 | */ | 3045 | */ |
3046 | static void netdev_wait_allrefs(struct net_device *dev) | 3046 | static void netdev_wait_allrefs(struct net_device *dev) |
3047 | { | 3047 | { |
@@ -3205,8 +3205,8 @@ EXPORT_SYMBOL(alloc_netdev); | |||
3205 | * free_netdev - free network device | 3205 | * free_netdev - free network device |
3206 | * @dev: device | 3206 | * @dev: device |
3207 | * | 3207 | * |
3208 | * This function does the last stage of destroying an allocated device | 3208 | * This function does the last stage of destroying an allocated device |
3209 | * interface. The reference to the device object is released. | 3209 | * interface. The reference to the device object is released. |
3210 | * If this is the last reference then it will be freed. | 3210 | * If this is the last reference then it will be freed. |
3211 | */ | 3211 | */ |
3212 | void free_netdev(struct net_device *dev) | 3212 | void free_netdev(struct net_device *dev) |
@@ -3227,9 +3227,9 @@ void free_netdev(struct net_device *dev) | |||
3227 | kfree((char *)dev - dev->padded); | 3227 | kfree((char *)dev - dev->padded); |
3228 | #endif | 3228 | #endif |
3229 | } | 3229 | } |
3230 | 3230 | ||
3231 | /* Synchronize with packet receive processing. */ | 3231 | /* Synchronize with packet receive processing. */ |
3232 | void synchronize_net(void) | 3232 | void synchronize_net(void) |
3233 | { | 3233 | { |
3234 | might_sleep(); | 3234 | might_sleep(); |
3235 | synchronize_rcu(); | 3235 | synchronize_rcu(); |
@@ -3291,12 +3291,12 @@ void unregister_netdevice(struct net_device *dev) | |||
3291 | /* Shutdown queueing discipline. */ | 3291 | /* Shutdown queueing discipline. */ |
3292 | dev_shutdown(dev); | 3292 | dev_shutdown(dev); |
3293 | 3293 | ||
3294 | 3294 | ||
3295 | /* Notify protocols, that we are about to destroy | 3295 | /* Notify protocols, that we are about to destroy |
3296 | this device. They should clean all the things. | 3296 | this device. They should clean all the things. |
3297 | */ | 3297 | */ |
3298 | raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); | 3298 | raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); |
3299 | 3299 | ||
3300 | /* | 3300 | /* |
3301 | * Flush the multicast chain | 3301 | * Flush the multicast chain |
3302 | */ | 3302 | */ |
@@ -3483,7 +3483,7 @@ static int __init net_dev_init(void) | |||
3483 | goto out; | 3483 | goto out; |
3484 | 3484 | ||
3485 | INIT_LIST_HEAD(&ptype_all); | 3485 | INIT_LIST_HEAD(&ptype_all); |
3486 | for (i = 0; i < 16; i++) | 3486 | for (i = 0; i < 16; i++) |
3487 | INIT_LIST_HEAD(&ptype_base[i]); | 3487 | INIT_LIST_HEAD(&ptype_base[i]); |
3488 | 3488 | ||
3489 | for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) | 3489 | for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) |