diff options
| -rw-r--r-- | drivers/net/ethernet/amd/hplance.c | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/amd/mvme147.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 3 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 36 | ||||
| -rw-r--r-- | net/core/filter.c | 8 | ||||
| -rw-r--r-- | net/ipv6/udp.c | 8 |
7 files changed, 48 insertions, 23 deletions
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c index 47ce57c2c893..6c9de117ffc6 100644 --- a/drivers/net/ethernet/amd/hplance.c +++ b/drivers/net/ethernet/amd/hplance.c | |||
| @@ -27,9 +27,9 @@ | |||
| 27 | 27 | ||
| 28 | #include "hplance.h" | 28 | #include "hplance.h" |
| 29 | 29 | ||
| 30 | /* We have 16834 bytes of RAM for the init block and buffers. This places | 30 | /* We have 16392 bytes of RAM for the init block and buffers. This places |
| 31 | * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx | 31 | * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx |
| 32 | * buffers and 2 Tx buffers. | 32 | * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes. |
| 33 | */ | 33 | */ |
| 34 | #define LANCE_LOG_TX_BUFFERS 1 | 34 | #define LANCE_LOG_TX_BUFFERS 1 |
| 35 | #define LANCE_LOG_RX_BUFFERS 3 | 35 | #define LANCE_LOG_RX_BUFFERS 3 |
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 0e8399dec054..0660ac5846bb 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c | |||
| @@ -26,9 +26,9 @@ | |||
| 26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 27 | #include <asm/mvme147hw.h> | 27 | #include <asm/mvme147hw.h> |
| 28 | 28 | ||
| 29 | /* We have 16834 bytes of RAM for the init block and buffers. This places | 29 | /* We have 32K of RAM for the init block and buffers. This places |
| 30 | * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx | 30 | * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx |
| 31 | * buffers and 2 Tx buffers. | 31 | * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes. |
| 32 | */ | 32 | */ |
| 33 | #define LANCE_LOG_TX_BUFFERS 1 | 33 | #define LANCE_LOG_TX_BUFFERS 1 |
| 34 | #define LANCE_LOG_RX_BUFFERS 3 | 34 | #define LANCE_LOG_RX_BUFFERS 3 |
| @@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit) | |||
| 111 | dev->dev_addr); | 111 | dev->dev_addr); |
| 112 | 112 | ||
| 113 | lp = netdev_priv(dev); | 113 | lp = netdev_priv(dev); |
| 114 | lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ | 114 | lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */ |
| 115 | if (!lp->ram) { | 115 | if (!lp->ram) { |
| 116 | printk("%s: No memory for LANCE buffers\n", dev->name); | 116 | printk("%s: No memory for LANCE buffers\n", dev->name); |
| 117 | free_netdev(dev); | 117 | free_netdev(dev); |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 0cc8b8d205fe..16f46f1224ba 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -1384,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev) | |||
| 1384 | 1384 | ||
| 1385 | gfar_hw_init(priv); | 1385 | gfar_hw_init(priv); |
| 1386 | 1386 | ||
| 1387 | /* Carrier starts down, phylib will bring it up */ | ||
| 1388 | netif_carrier_off(dev); | ||
| 1389 | |||
| 1387 | err = register_netdev(dev); | 1390 | err = register_netdev(dev); |
| 1388 | 1391 | ||
| 1389 | if (err) { | 1392 | if (err) { |
| @@ -1391,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev) | |||
| 1391 | goto register_fail; | 1394 | goto register_fail; |
| 1392 | } | 1395 | } |
| 1393 | 1396 | ||
| 1394 | /* Carrier starts down, phylib will bring it up */ | ||
| 1395 | netif_carrier_off(dev); | ||
| 1396 | |||
| 1397 | device_init_wakeup(&dev->dev, | 1397 | device_init_wakeup(&dev->dev, |
| 1398 | priv->device_flags & | 1398 | priv->device_flags & |
| 1399 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | 1399 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 2bdd9deffb38..1659c804f1d5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
| @@ -458,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) | |||
| 458 | struct qlcnic_cmd_args cmd; | 458 | struct qlcnic_cmd_args cmd; |
| 459 | int ret = 0; | 459 | int ret = 0; |
| 460 | 460 | ||
| 461 | memset(&cmd, 0, sizeof(cmd)); | ||
| 461 | ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); | 462 | ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); |
| 462 | if (ret) | 463 | if (ret) |
| 463 | return ret; | 464 | return ret; |
| @@ -1463,6 +1464,7 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o | |||
| 1463 | struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; | 1464 | struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; |
| 1464 | int ret; | 1465 | int ret; |
| 1465 | 1466 | ||
| 1467 | memset(&cmd, 0, sizeof(cmd)); | ||
| 1466 | if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) | 1468 | if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) |
| 1467 | return -ENOMEM; | 1469 | return -ENOMEM; |
| 1468 | 1470 | ||
| @@ -2033,6 +2035,7 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, | |||
| 2033 | struct qlcnic_cmd_args cmd; | 2035 | struct qlcnic_cmd_args cmd; |
| 2034 | int ret; | 2036 | int ret; |
| 2035 | 2037 | ||
| 2038 | memset(&cmd, 0, sizeof(cmd)); | ||
| 2036 | if (vid == 0) | 2039 | if (vid == 0) |
| 2037 | return 0; | 2040 | return 0; |
| 2038 | 2041 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 49efff9b99f4..1844a47636b6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -168,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) | |||
| 168 | * adding 'size' bytes to a buffer which currently contains 'offset' | 168 | * adding 'size' bytes to a buffer which currently contains 'offset' |
| 169 | * bytes. | 169 | * bytes. |
| 170 | */ | 170 | */ |
| 171 | static bool start_new_rx_buffer(int offset, unsigned long size, int head) | 171 | static bool start_new_rx_buffer(int offset, unsigned long size, int head, |
| 172 | bool full_coalesce) | ||
| 172 | { | 173 | { |
| 173 | /* simple case: we have completely filled the current buffer. */ | 174 | /* simple case: we have completely filled the current buffer. */ |
| 174 | if (offset == MAX_BUFFER_OFFSET) | 175 | if (offset == MAX_BUFFER_OFFSET) |
| @@ -180,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
| 180 | * (i) this frag would fit completely in the next buffer | 181 | * (i) this frag would fit completely in the next buffer |
| 181 | * and (ii) there is already some data in the current buffer | 182 | * and (ii) there is already some data in the current buffer |
| 182 | * and (iii) this is not the head buffer. | 183 | * and (iii) this is not the head buffer. |
| 184 | * and (iv) there is no need to fully utilize the buffers | ||
| 183 | * | 185 | * |
| 184 | * Where: | 186 | * Where: |
| 185 | * - (i) stops us splitting a frag into two copies | 187 | * - (i) stops us splitting a frag into two copies |
| @@ -190,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
| 190 | * by (ii) but is explicitly checked because | 192 | * by (ii) but is explicitly checked because |
| 191 | * netfront relies on the first buffer being | 193 | * netfront relies on the first buffer being |
| 192 | * non-empty and can crash otherwise. | 194 | * non-empty and can crash otherwise. |
| 195 | * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS | ||
| 196 | * slot | ||
| 193 | * | 197 | * |
| 194 | * This means we will effectively linearise small | 198 | * This means we will effectively linearise small |
| 195 | * frags but do not needlessly split large buffers | 199 | * frags but do not needlessly split large buffers |
| @@ -197,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
| 197 | * own buffers as before. | 201 | * own buffers as before. |
| 198 | */ | 202 | */ |
| 199 | BUG_ON(size > MAX_BUFFER_OFFSET); | 203 | BUG_ON(size > MAX_BUFFER_OFFSET); |
| 200 | if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) | 204 | if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head && |
| 205 | !full_coalesce) | ||
| 201 | return true; | 206 | return true; |
| 202 | 207 | ||
| 203 | return false; | 208 | return false; |
| @@ -232,6 +237,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, | |||
| 232 | return meta; | 237 | return meta; |
| 233 | } | 238 | } |
| 234 | 239 | ||
| 240 | struct xenvif_rx_cb { | ||
| 241 | int meta_slots_used; | ||
| 242 | bool full_coalesce; | ||
| 243 | }; | ||
| 244 | |||
| 245 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) | ||
| 246 | |||
| 235 | /* | 247 | /* |
| 236 | * Set up the grant operations for this fragment. If it's a flipping | 248 | * Set up the grant operations for this fragment. If it's a flipping |
| 237 | * interface, we also set up the unmap request from here. | 249 | * interface, we also set up the unmap request from here. |
| @@ -266,7 +278,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb | |||
| 266 | if (bytes > size) | 278 | if (bytes > size) |
| 267 | bytes = size; | 279 | bytes = size; |
| 268 | 280 | ||
| 269 | if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { | 281 | if (start_new_rx_buffer(npo->copy_off, |
| 282 | bytes, | ||
| 283 | *head, | ||
| 284 | XENVIF_RX_CB(skb)->full_coalesce)) { | ||
| 270 | /* | 285 | /* |
| 271 | * Netfront requires there to be some data in the head | 286 | * Netfront requires there to be some data in the head |
| 272 | * buffer. | 287 | * buffer. |
| @@ -548,12 +563,6 @@ static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, | |||
| 548 | } | 563 | } |
| 549 | } | 564 | } |
| 550 | 565 | ||
| 551 | struct xenvif_rx_cb { | ||
| 552 | int meta_slots_used; | ||
| 553 | }; | ||
| 554 | |||
| 555 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) | ||
| 556 | |||
| 557 | void xenvif_kick_thread(struct xenvif_queue *queue) | 566 | void xenvif_kick_thread(struct xenvif_queue *queue) |
| 558 | { | 567 | { |
| 559 | wake_up(&queue->wq); | 568 | wake_up(&queue->wq); |
| @@ -609,10 +618,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue) | |||
| 609 | 618 | ||
| 610 | /* To avoid the estimate becoming too pessimal for some | 619 | /* To avoid the estimate becoming too pessimal for some |
| 611 | * frontends that limit posted rx requests, cap the estimate | 620 | * frontends that limit posted rx requests, cap the estimate |
| 612 | * at MAX_SKB_FRAGS. | 621 | * at MAX_SKB_FRAGS. In this case netback will fully coalesce |
| 622 | * the skb into the provided slots. | ||
| 613 | */ | 623 | */ |
| 614 | if (max_slots_needed > MAX_SKB_FRAGS) | 624 | if (max_slots_needed > MAX_SKB_FRAGS) { |
| 615 | max_slots_needed = MAX_SKB_FRAGS; | 625 | max_slots_needed = MAX_SKB_FRAGS; |
| 626 | XENVIF_RX_CB(skb)->full_coalesce = true; | ||
| 627 | } else { | ||
| 628 | XENVIF_RX_CB(skb)->full_coalesce = false; | ||
| 629 | } | ||
| 616 | 630 | ||
| 617 | /* We may need one more slot for GSO metadata */ | 631 | /* We may need one more slot for GSO metadata */ |
| 618 | if (skb_is_gso(skb) && | 632 | if (skb_is_gso(skb) && |
diff --git a/net/core/filter.c b/net/core/filter.c index 9de0c25323b4..6bd2e350e751 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -584,7 +584,11 @@ load_byte: | |||
| 584 | * to make sure its still a 3bit field starting at a byte boundary; | 584 | * to make sure its still a 3bit field starting at a byte boundary; |
| 585 | * taken from arch/x86/net/bpf_jit_comp.c. | 585 | * taken from arch/x86/net/bpf_jit_comp.c. |
| 586 | */ | 586 | */ |
| 587 | #ifdef __BIG_ENDIAN_BITFIELD | ||
| 588 | #define PKT_TYPE_MAX (7 << 5) | ||
| 589 | #else | ||
| 587 | #define PKT_TYPE_MAX 7 | 590 | #define PKT_TYPE_MAX 7 |
| 591 | #endif | ||
| 588 | static unsigned int pkt_type_offset(void) | 592 | static unsigned int pkt_type_offset(void) |
| 589 | { | 593 | { |
| 590 | struct sk_buff skb_probe = { .pkt_type = ~0, }; | 594 | struct sk_buff skb_probe = { .pkt_type = ~0, }; |
| @@ -685,6 +689,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
| 685 | return false; | 689 | return false; |
| 686 | insn++; | 690 | insn++; |
| 687 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); | 691 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); |
| 692 | #ifdef __BIG_ENDIAN_BITFIELD | ||
| 693 | insn++; | ||
| 694 | *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5); | ||
| 695 | #endif | ||
| 688 | break; | 696 | break; |
| 689 | 697 | ||
| 690 | case SKF_AD_OFF + SKF_AD_IFINDEX: | 698 | case SKF_AD_OFF + SKF_AD_IFINDEX: |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 60325236446a..4180f54a948e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -720,15 +720,15 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
| 720 | if (inet->inet_dport != rmt_port) | 720 | if (inet->inet_dport != rmt_port) |
| 721 | continue; | 721 | continue; |
| 722 | } | 722 | } |
| 723 | if (!ipv6_addr_any(&sk->sk_v6_daddr) && | 723 | if (!ipv6_addr_any(&s->sk_v6_daddr) && |
| 724 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) | 724 | !ipv6_addr_equal(&s->sk_v6_daddr, rmt_addr)) |
| 725 | continue; | 725 | continue; |
| 726 | 726 | ||
| 727 | if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) | 727 | if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) |
| 728 | continue; | 728 | continue; |
| 729 | 729 | ||
| 730 | if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { | 730 | if (!ipv6_addr_any(&s->sk_v6_rcv_saddr)) { |
| 731 | if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) | 731 | if (!ipv6_addr_equal(&s->sk_v6_rcv_saddr, loc_addr)) |
| 732 | continue; | 732 | continue; |
| 733 | } | 733 | } |
| 734 | if (!inet6_mc_check(s, loc_addr, rmt_addr)) | 734 | if (!inet6_mc_check(s, loc_addr, rmt_addr)) |
