aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c238
1 files changed, 70 insertions, 168 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c09eec..b715a55cccc4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
171static struct list_head ptype_all __read_mostly; /* Taps */ 171static struct list_head ptype_all __read_mostly; /* Taps */
172 172
173#ifdef CONFIG_NET_DMA
174struct net_dma {
175 struct dma_client client;
176 spinlock_t lock;
177 cpumask_t channel_mask;
178 struct dma_chan **channels;
179};
180
181static enum dma_state_client
182netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
183 enum dma_state state);
184
185static struct net_dma net_dma = {
186 .client = {
187 .event_callback = netdev_dma_event,
188 },
189};
190#endif
191
192/* 173/*
193 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
194 * semaphore. 175 * semaphore.
@@ -1107,6 +1088,11 @@ int dev_open(struct net_device *dev)
1107 dev->flags |= IFF_UP; 1088 dev->flags |= IFF_UP;
1108 1089
1109 /* 1090 /*
1091 * Enable NET_DMA
1092 */
1093 dmaengine_get();
1094
1095 /*
1110 * Initialize multicasting status 1096 * Initialize multicasting status
1111 */ 1097 */
1112 dev_set_rx_mode(dev); 1098 dev_set_rx_mode(dev);
@@ -1183,6 +1169,11 @@ int dev_close(struct net_device *dev)
1183 */ 1169 */
1184 call_netdevice_notifiers(NETDEV_DOWN, dev); 1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1185 1171
1172 /*
1173 * Shutdown NET_DMA
1174 */
1175 dmaengine_put();
1176
1186 return 0; 1177 return 0;
1187} 1178}
1188 1179
@@ -2387,7 +2378,7 @@ void napi_gro_flush(struct napi_struct *napi)
2387} 2378}
2388EXPORT_SYMBOL(napi_gro_flush); 2379EXPORT_SYMBOL(napi_gro_flush);
2389 2380
2390static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2381int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2391{ 2382{
2392 struct sk_buff **pp = NULL; 2383 struct sk_buff **pp = NULL;
2393 struct packet_type *ptype; 2384 struct packet_type *ptype;
@@ -2417,11 +2408,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2417 2408
2418 for (p = napi->gro_list; p; p = p->next) { 2409 for (p = napi->gro_list; p; p = p->next) {
2419 count++; 2410 count++;
2420 NAPI_GRO_CB(p)->same_flow = 2411
2421 p->mac_len == mac_len && 2412 if (!NAPI_GRO_CB(p)->same_flow)
2422 !memcmp(skb_mac_header(p), skb_mac_header(skb), 2413 continue;
2423 mac_len); 2414
2424 NAPI_GRO_CB(p)->flush = 0; 2415 if (p->mac_len != mac_len ||
2416 memcmp(skb_mac_header(p), skb_mac_header(skb),
2417 mac_len))
2418 NAPI_GRO_CB(p)->same_flow = 0;
2425 } 2419 }
2426 2420
2427 pp = ptype->gro_receive(&napi->gro_list, skb); 2421 pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2457,19 @@ ok:
2463normal: 2457normal:
2464 return -1; 2458 return -1;
2465} 2459}
2460EXPORT_SYMBOL(dev_gro_receive);
2461
2462static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2463{
2464 struct sk_buff *p;
2465
2466 for (p = napi->gro_list; p; p = p->next) {
2467 NAPI_GRO_CB(p)->same_flow = 1;
2468 NAPI_GRO_CB(p)->flush = 0;
2469 }
2470
2471 return dev_gro_receive(napi, skb);
2472}
2466 2473
2467int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2474int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2468{ 2475{
@@ -2479,11 +2486,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2479} 2486}
2480EXPORT_SYMBOL(napi_gro_receive); 2487EXPORT_SYMBOL(napi_gro_receive);
2481 2488
2482int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2489void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2490{
2491 skb_shinfo(skb)->nr_frags = 0;
2492
2493 skb->len -= skb->data_len;
2494 skb->truesize -= skb->data_len;
2495 skb->data_len = 0;
2496
2497 __skb_pull(skb, skb_headlen(skb));
2498 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2499
2500 napi->skb = skb;
2501}
2502EXPORT_SYMBOL(napi_reuse_skb);
2503
2504struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2505 struct napi_gro_fraginfo *info)
2483{ 2506{
2484 struct net_device *dev = napi->dev; 2507 struct net_device *dev = napi->dev;
2485 struct sk_buff *skb = napi->skb; 2508 struct sk_buff *skb = napi->skb;
2486 int err = NET_RX_DROP;
2487 2509
2488 napi->skb = NULL; 2510 napi->skb = NULL;
2489 2511
@@ -2503,16 +2525,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2503 skb->len += info->len; 2525 skb->len += info->len;
2504 skb->truesize += info->len; 2526 skb->truesize += info->len;
2505 2527
2506 if (!pskb_may_pull(skb, ETH_HLEN)) 2528 if (!pskb_may_pull(skb, ETH_HLEN)) {
2507 goto reuse; 2529 napi_reuse_skb(napi, skb);
2508 2530 goto out;
2509 err = NET_RX_SUCCESS; 2531 }
2510 2532
2511 skb->protocol = eth_type_trans(skb, dev); 2533 skb->protocol = eth_type_trans(skb, dev);
2512 2534
2513 skb->ip_summed = info->ip_summed; 2535 skb->ip_summed = info->ip_summed;
2514 skb->csum = info->csum; 2536 skb->csum = info->csum;
2515 2537
2538out:
2539 return skb;
2540}
2541EXPORT_SYMBOL(napi_fraginfo_skb);
2542
2543int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2544{
2545 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2546 int err = NET_RX_DROP;
2547
2548 if (!skb)
2549 goto out;
2550
2551 err = NET_RX_SUCCESS;
2552
2516 switch (__napi_gro_receive(napi, skb)) { 2553 switch (__napi_gro_receive(napi, skb)) {
2517 case -1: 2554 case -1:
2518 return netif_receive_skb(skb); 2555 return netif_receive_skb(skb);
@@ -2521,17 +2558,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2521 goto out; 2558 goto out;
2522 } 2559 }
2523 2560
2524reuse: 2561 napi_reuse_skb(napi, skb);
2525 skb_shinfo(skb)->nr_frags = 0;
2526
2527 skb->len -= skb->data_len;
2528 skb->truesize -= skb->data_len;
2529 skb->data_len = 0;
2530
2531 __skb_pull(skb, skb_headlen(skb));
2532 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2533
2534 napi->skb = skb;
2535 2562
2536out: 2563out:
2537 return err; 2564 return err;
@@ -2718,14 +2745,7 @@ out:
2718 * There may not be any more sk_buffs coming right now, so push 2745 * There may not be any more sk_buffs coming right now, so push
2719 * any pending DMA copies to hardware 2746 * any pending DMA copies to hardware
2720 */ 2747 */
2721 if (!cpus_empty(net_dma.channel_mask)) { 2748 dma_issue_pending_all();
2722 int chan_idx;
2723 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2724 struct dma_chan *chan = net_dma.channels[chan_idx];
2725 if (chan)
2726 dma_async_memcpy_issue_pending(chan);
2727 }
2728 }
2729#endif 2749#endif
2730 2750
2731 return; 2751 return;
@@ -4916,122 +4936,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4916 return NOTIFY_OK; 4936 return NOTIFY_OK;
4917} 4937}
4918 4938
4919#ifdef CONFIG_NET_DMA
4920/**
4921 * net_dma_rebalance - try to maintain one DMA channel per CPU
4922 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4923 *
4924 * This is called when the number of channels allocated to the net_dma client
4925 * changes. The net_dma client tries to have one DMA channel per CPU.
4926 */
4927
4928static void net_dma_rebalance(struct net_dma *net_dma)
4929{
4930 unsigned int cpu, i, n, chan_idx;
4931 struct dma_chan *chan;
4932
4933 if (cpus_empty(net_dma->channel_mask)) {
4934 for_each_online_cpu(cpu)
4935 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4936 return;
4937 }
4938
4939 i = 0;
4940 cpu = first_cpu(cpu_online_map);
4941
4942 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4943 chan = net_dma->channels[chan_idx];
4944
4945 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4946 + (i < (num_online_cpus() %
4947 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4948
4949 while(n) {
4950 per_cpu(softnet_data, cpu).net_dma = chan;
4951 cpu = next_cpu(cpu, cpu_online_map);
4952 n--;
4953 }
4954 i++;
4955 }
4956}
4957
4958/**
4959 * netdev_dma_event - event callback for the net_dma_client
4960 * @client: should always be net_dma_client
4961 * @chan: DMA channel for the event
4962 * @state: DMA state to be handled
4963 */
4964static enum dma_state_client
4965netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4966 enum dma_state state)
4967{
4968 int i, found = 0, pos = -1;
4969 struct net_dma *net_dma =
4970 container_of(client, struct net_dma, client);
4971 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4972
4973 spin_lock(&net_dma->lock);
4974 switch (state) {
4975 case DMA_RESOURCE_AVAILABLE:
4976 for (i = 0; i < nr_cpu_ids; i++)
4977 if (net_dma->channels[i] == chan) {
4978 found = 1;
4979 break;
4980 } else if (net_dma->channels[i] == NULL && pos < 0)
4981 pos = i;
4982
4983 if (!found && pos >= 0) {
4984 ack = DMA_ACK;
4985 net_dma->channels[pos] = chan;
4986 cpu_set(pos, net_dma->channel_mask);
4987 net_dma_rebalance(net_dma);
4988 }
4989 break;
4990 case DMA_RESOURCE_REMOVED:
4991 for (i = 0; i < nr_cpu_ids; i++)
4992 if (net_dma->channels[i] == chan) {
4993 found = 1;
4994 pos = i;
4995 break;
4996 }
4997
4998 if (found) {
4999 ack = DMA_ACK;
5000 cpu_clear(pos, net_dma->channel_mask);
5001 net_dma->channels[i] = NULL;
5002 net_dma_rebalance(net_dma);
5003 }
5004 break;
5005 default:
5006 break;
5007 }
5008 spin_unlock(&net_dma->lock);
5009
5010 return ack;
5011}
5012
5013/**
5014 * netdev_dma_register - register the networking subsystem as a DMA client
5015 */
5016static int __init netdev_dma_register(void)
5017{
5018 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
5019 GFP_KERNEL);
5020 if (unlikely(!net_dma.channels)) {
5021 printk(KERN_NOTICE
5022 "netdev_dma: no memory for net_dma.channels\n");
5023 return -ENOMEM;
5024 }
5025 spin_lock_init(&net_dma.lock);
5026 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
5027 dma_async_client_register(&net_dma.client);
5028 dma_async_client_chan_request(&net_dma.client);
5029 return 0;
5030}
5031
5032#else
5033static int __init netdev_dma_register(void) { return -ENODEV; }
5034#endif /* CONFIG_NET_DMA */
5035 4939
5036/** 4940/**
5037 * netdev_increment_features - increment feature set by one 4941 * netdev_increment_features - increment feature set by one
@@ -5251,8 +5155,6 @@ static int __init net_dev_init(void)
5251 if (register_pernet_device(&default_device_ops)) 5155 if (register_pernet_device(&default_device_ops))
5252 goto out; 5156 goto out;
5253 5157
5254 netdev_dma_register();
5255
5256 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5158 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5257 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5159 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5258 5160