aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/lib/user_fixup.c
diff options
context:
space:
mode:
authorBjorn Helgaas <bjorn.helgaas@hp.com>2010-04-27 16:45:43 -0400
committerJesse Barnes <jbarnes@virtuousgeek.org>2010-04-28 12:17:45 -0400
commit48728e077480910df45baabc5f87b04276348c90 (patch)
tree8a8114da4840e84ba60050b953123168aefc2dcf /arch/sparc/lib/user_fixup.c
parent55051feb57eba600b366006757304a0af3ada2bd (diff)
x86/PCI: compute Address Space length rather than using _LEN
ACPI _CRS Address Space Descriptors have _MIN, _MAX, and _LEN. Linux has been computing Address Spaces as [_MIN to _MIN + _LEN - 1]. Based on the tests in the bug reports below, Windows apparently uses [_MIN to _MAX]. Per spec (ACPI 4.0, Table 6-40), for _CRS fixed-size, fixed location descriptors, "_LEN must be (_MAX - _MIN + 1)", and when that's true, it doesn't matter which way we compute the end. But of course, there are BIOSes that don't follow this rule, and we're better off if Linux handles those exceptions the same way as Windows. This patch makes Linux use [_MIN to _MAX], as Windows seems to do. This effectively reverts d558b483d5 and 03db42adfe and replaces them with simpler code. https://bugzilla.kernel.org/show_bug.cgi?id=14337 (round) https://bugzilla.kernel.org/show_bug.cgi?id=15480 (truncate) Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'arch/sparc/lib/user_fixup.c')
0 files changed, 0 insertions, 0 deletions
nux/err.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/netfilter_bridge.h> #include "br_private.h" static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)); /* Don't forward packets to originating port or forwarding diasabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) { return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && p->state == BR_STATE_FORWARDING); } static inline unsigned int packet_length(const struct sk_buff *skb) { return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); } int br_dev_queue_push_xmit(struct sk_buff *skb) { /* ip_fragment doesn't copy the MAC header */ if (nf_bridge_maybe_copy_header(skb) || (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) { kfree_skb(skb); } else { skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); dev_queue_xmit(skb); } return 0; } int br_forward_finish(struct sk_buff *skb) { return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, br_dev_queue_push_xmit); } static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); if (!skb) return; skb->dev = to->dev; if (unlikely(netpoll_tx_running(to->br->dev))) { if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { skb_push(skb, ETH_HLEN); br_netpoll_send_skb(to, skb); } return; } NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, br_forward_finish); } static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_device *indev; if (skb_warn_if_lro(skb)) { kfree_skb(skb); return; } skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); if (!skb) return; indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, br_forward_finish); } /* called with rcu_read_lock */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { if (to && should_deliver(to, skb)) { __br_deliver(to, skb); return; } kfree_skb(skb); } /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { if (should_deliver(to, skb)) { if (skb0) deliver_clone(to, skb, __br_forward); else __br_forward(to, skb); return; } if (!skb0) kfree_skb(skb); } static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) { dev->stats.tx_dropped++; return -ENOMEM; } __packet_hook(prev, skb); return 0; } static struct net_bridge_port *maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { int err; if (!should_deliver(p, skb)) return prev; if (!prev) goto out; err = deliver_clone(prev, skb, __packet_hook); if (err) return ERR_PTR(err); out: return p; } /* called under bridge lock */ static void br_flood(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb), bool unicast) { struct net_bridge_port *p; struct net_bridge_port *prev; prev = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { /* Do not flood unicast traffic to ports that turn it off */ if (unicast && !(p->flags & BR_FLOOD)) continue; prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) { br_flood(br, skb, NULL, __br_deliver, unicast); } /* called under bridge lock */ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb2, bool unicast) { br_flood(br, skb, skb2, __br_forward, unicast); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING /* called with rcu_read_lock */ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)( const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *prev = NULL; struct net_bridge_port_group *p; struct hlist_node *rp; rp = rcu_dereference(hlist_first_rcu(&br->router_list)); p = mdst ? rcu_dereference(mdst->ports) : NULL; while (p || rp) { struct net_bridge_port *port, *lport, *rport; lport = p ? p->port : NULL; rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : NULL; port = (unsigned long)lport > (unsigned long)rport ? lport : rport; prev = maybe_deliver(prev, port, skb, __packet_hook); if (IS_ERR(prev)) goto out; if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); if ((unsigned long)rport >= (unsigned long)port) rp = rcu_dereference(hlist_next_rcu(rp)); } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb) { br_multicast_flood(mdst, skb, NULL, __br_deliver); } /* called with rcu_read_lock */ void br_multicast_forward(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb2) { br_multicast_flood(mdst, skb, skb2, __br_forward); } #endif