diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 3 | ||||
-rw-r--r-- | net/core/net_namespace.c | 32 | ||||
-rw-r--r-- | net/core/pktgen.c | 27 | ||||
-rw-r--r-- | net/core/scm.c | 24 | ||||
-rw-r--r-- | net/core/skbuff.c | 12 |
5 files changed, 81 insertions, 17 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index d9038e328cc1..9174c77d3112 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2218,6 +2218,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2218 | int ret = NET_RX_DROP; | 2218 | int ret = NET_RX_DROP; |
2219 | __be16 type; | 2219 | __be16 type; |
2220 | 2220 | ||
2221 | if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) | ||
2222 | return NET_RX_SUCCESS; | ||
2223 | |||
2221 | /* if we've gotten here through NAPI, check netpoll */ | 2224 | /* if we've gotten here through NAPI, check netpoll */ |
2222 | if (netpoll_receive_skb(skb)) | 2225 | if (netpoll_receive_skb(skb)) |
2223 | return NET_RX_DROP; | 2226 | return NET_RX_DROP; |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index f1d07b5c1e17..1895a4ca9c4f 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -325,6 +325,38 @@ void unregister_pernet_subsys(struct pernet_operations *module) | |||
325 | } | 325 | } |
326 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); | 326 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); |
327 | 327 | ||
328 | int register_pernet_gen_subsys(int *id, struct pernet_operations *ops) | ||
329 | { | ||
330 | int rv; | ||
331 | |||
332 | mutex_lock(&net_mutex); | ||
333 | again: | ||
334 | rv = ida_get_new_above(&net_generic_ids, 1, id); | ||
335 | if (rv < 0) { | ||
336 | if (rv == -EAGAIN) { | ||
337 | ida_pre_get(&net_generic_ids, GFP_KERNEL); | ||
338 | goto again; | ||
339 | } | ||
340 | goto out; | ||
341 | } | ||
342 | rv = register_pernet_operations(first_device, ops); | ||
343 | if (rv < 0) | ||
344 | ida_remove(&net_generic_ids, *id); | ||
345 | mutex_unlock(&net_mutex); | ||
346 | out: | ||
347 | return rv; | ||
348 | } | ||
349 | EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); | ||
350 | |||
351 | void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops) | ||
352 | { | ||
353 | mutex_lock(&net_mutex); | ||
354 | unregister_pernet_operations(ops); | ||
355 | ida_remove(&net_generic_ids, id); | ||
356 | mutex_unlock(&net_mutex); | ||
357 | } | ||
358 | EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys); | ||
359 | |||
328 | /** | 360 | /** |
329 | * register_pernet_device - register a network namespace device | 361 | * register_pernet_device - register a network namespace device |
330 | * @ops: pernet operations structure for the subsystem | 362 | * @ops: pernet operations structure for the subsystem |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 99f656d35b4f..a47f5bad110d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1973,28 +1973,27 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
1973 | 1973 | ||
1974 | /* make sure that we don't pick a non-existing transmit queue */ | 1974 | /* make sure that we don't pick a non-existing transmit queue */ |
1975 | ntxq = pkt_dev->odev->real_num_tx_queues; | 1975 | ntxq = pkt_dev->odev->real_num_tx_queues; |
1976 | if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { | 1976 | if (ntxq > num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { |
1977 | printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " | 1977 | printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " |
1978 | "disabled because CPU count (%d) exceeds number ", | 1978 | "disabled because CPU count (%d) exceeds number " |
1979 | num_online_cpus()); | 1979 | "of tx queues (%d) on %s\n", num_online_cpus(), ntxq, |
1980 | printk(KERN_WARNING "pktgen: WARNING: of tx queues " | 1980 | pkt_dev->odev->name); |
1981 | "(%d) on %s \n", ntxq, pkt_dev->odev->name); | ||
1982 | pkt_dev->flags &= ~F_QUEUE_MAP_CPU; | 1981 | pkt_dev->flags &= ~F_QUEUE_MAP_CPU; |
1983 | } | 1982 | } |
1984 | if (ntxq <= pkt_dev->queue_map_min) { | 1983 | if (ntxq <= pkt_dev->queue_map_min) { |
1985 | printk(KERN_WARNING "pktgen: WARNING: Requested " | 1984 | printk(KERN_WARNING "pktgen: WARNING: Requested " |
1986 | "queue_map_min (%d) exceeds number of tx\n", | 1985 | "queue_map_min (zero-based) (%d) exceeds valid range " |
1987 | pkt_dev->queue_map_min); | 1986 | "[0 - %d] for (%d) queues on %s, resetting\n", |
1988 | printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " | 1987 | pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, |
1989 | "%s, resetting\n", ntxq, pkt_dev->odev->name); | 1988 | pkt_dev->odev->name); |
1990 | pkt_dev->queue_map_min = ntxq - 1; | 1989 | pkt_dev->queue_map_min = ntxq - 1; |
1991 | } | 1990 | } |
1992 | if (ntxq <= pkt_dev->queue_map_max) { | 1991 | if (pkt_dev->queue_map_max >= ntxq) { |
1993 | printk(KERN_WARNING "pktgen: WARNING: Requested " | 1992 | printk(KERN_WARNING "pktgen: WARNING: Requested " |
1994 | "queue_map_max (%d) exceeds number of tx\n", | 1993 | "queue_map_max (zero-based) (%d) exceeds valid range " |
1995 | pkt_dev->queue_map_max); | 1994 | "[0 - %d] for (%d) queues on %s, resetting\n", |
1996 | printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " | 1995 | pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, |
1997 | "%s, resetting\n", ntxq, pkt_dev->odev->name); | 1996 | pkt_dev->odev->name); |
1998 | pkt_dev->queue_map_max = ntxq - 1; | 1997 | pkt_dev->queue_map_max = ntxq - 1; |
1999 | } | 1998 | } |
2000 | 1999 | ||
diff --git a/net/core/scm.c b/net/core/scm.c index 10f5c65f6a47..ab242cc1acca 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) | |||
75 | if (!fpl) | 75 | if (!fpl) |
76 | return -ENOMEM; | 76 | return -ENOMEM; |
77 | *fplp = fpl; | 77 | *fplp = fpl; |
78 | INIT_LIST_HEAD(&fpl->list); | ||
78 | fpl->count = 0; | 79 | fpl->count = 0; |
79 | } | 80 | } |
80 | fpp = &fpl->fp[fpl->count]; | 81 | fpp = &fpl->fp[fpl->count]; |
@@ -106,9 +107,25 @@ void __scm_destroy(struct scm_cookie *scm) | |||
106 | 107 | ||
107 | if (fpl) { | 108 | if (fpl) { |
108 | scm->fp = NULL; | 109 | scm->fp = NULL; |
109 | for (i=fpl->count-1; i>=0; i--) | 110 | if (current->scm_work_list) { |
110 | fput(fpl->fp[i]); | 111 | list_add_tail(&fpl->list, current->scm_work_list); |
111 | kfree(fpl); | 112 | } else { |
113 | LIST_HEAD(work_list); | ||
114 | |||
115 | current->scm_work_list = &work_list; | ||
116 | |||
117 | list_add(&fpl->list, &work_list); | ||
118 | while (!list_empty(&work_list)) { | ||
119 | fpl = list_first_entry(&work_list, struct scm_fp_list, list); | ||
120 | |||
121 | list_del(&fpl->list); | ||
122 | for (i=fpl->count-1; i>=0; i--) | ||
123 | fput(fpl->fp[i]); | ||
124 | kfree(fpl); | ||
125 | } | ||
126 | |||
127 | current->scm_work_list = NULL; | ||
128 | } | ||
112 | } | 129 | } |
113 | } | 130 | } |
114 | 131 | ||
@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) | |||
284 | 301 | ||
285 | new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); | 302 | new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); |
286 | if (new_fpl) { | 303 | if (new_fpl) { |
304 | INIT_LIST_HEAD(&new_fpl->list); | ||
287 | for (i=fpl->count-1; i>=0; i--) | 305 | for (i=fpl->count-1; i>=0; i--) |
288 | get_file(fpl->fp[i]); | 306 | get_file(fpl->fp[i]); |
289 | memcpy(new_fpl, fpl, sizeof(*fpl)); | 307 | memcpy(new_fpl, fpl, sizeof(*fpl)); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e22e3a35359..ebb6b94f8af2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -449,6 +449,18 @@ void kfree_skb(struct sk_buff *skb) | |||
449 | __kfree_skb(skb); | 449 | __kfree_skb(skb); |
450 | } | 450 | } |
451 | 451 | ||
452 | /** | ||
453 | * skb_recycle_check - check if skb can be reused for receive | ||
454 | * @skb: buffer | ||
455 | * @skb_size: minimum receive buffer size | ||
456 | * | ||
457 | * Checks that the skb passed in is not shared or cloned, and | ||
458 | * that it is linear and its head portion at least as large as | ||
459 | * skb_size so that it can be recycled as a receive buffer. | ||
460 | * If these conditions are met, this function does any necessary | ||
461 | * reference count dropping and cleans up the skbuff as if it | ||
462 | * just came from __alloc_skb(). | ||
463 | */ | ||
452 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 464 | int skb_recycle_check(struct sk_buff *skb, int skb_size) |
453 | { | 465 | { |
454 | struct skb_shared_info *shinfo; | 466 | struct skb_shared_info *shinfo; |